From 8690d2bf91a7d12fe56b4fb4ea7394f8982e429f Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 11:17:48 +0100 Subject: [PATCH 01/40] Fix generate and submit documentation workflow --- .github/workflows/generate-and-submit-documentation.yaml | 1 - docker/Dockerfile | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index d1190b26..676163ac 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -6,7 +6,6 @@ on: jobs: generate-documentation: - if: github.event.pull_request.merged == true && github.event.pull_request.base.ref == 'main' && github.event.pull_request.head.ref == 'develop' runs-on: ubuntu-latest steps: - name: Checkout diff --git a/docker/Dockerfile b/docker/Dockerfile index ef9efc13..9eaed5aa 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -77,7 +77,7 @@ RUN sage -pip install bitstring==4.0.1 \ pygments==2.12.0 \ sage-package==0.0.7 \ setuptools==59.6.0 \ - sphinx==4.5.0 \ + sphinx==5.0.0 \ sphinxcontrib-bibtex==2.5.0 \ tensorflow==2.13.0 \ pytest==7.2.1 \ From e3738c193513c8acf5b7082e711fb74319ab91d7 Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 11:57:14 +0100 Subject: [PATCH 02/40] Change generate-and-submit-documentation.yaml to execute the action in current branch. --- .github/workflows/generate-and-submit-documentation.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 676163ac..f374c919 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -2,7 +2,7 @@ name: Generate and submit documentation on: push: branches: - - main + - fix/generate-documentation-workflow jobs: generate-documentation: From b0bd9a79a358feed1fc34a928ffba8fba9210bac Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 12:20:44 +0100 Subject: [PATCH 03/40] Clean docker images before create a new image. --- .github/workflows/generate-and-submit-documentation.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index f374c919..dedc2df1 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -1,4 +1,5 @@ name: Generate and submit documentation + on: push: branches: @@ -17,6 +18,9 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 + - name: Clean up Docker images + run: docker system prune -af --volumes + - name: Cache Docker layers uses: actions/cache@v3 with: From 591063a4ad476841b81b5adfb571f99090bf64d2 Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 12:35:28 +0100 Subject: [PATCH 04/40] Free up space on runner --- .github/workflows/generate-and-submit-documentation.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index dedc2df1..019af056 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -21,6 +21,13 @@ jobs: - name: Clean up Docker images run: docker system prune -af --volumes + - name: Free up space on runner + run: | + sudo apt-get clean + sudo rm -rf /var/lib/apt/lists/* + sudo rm -rf /tmp/* + sudo rm -rf /var/tmp/* + - name: Cache Docker layers uses: actions/cache@v3 with: From 3462071794ad7181a257971a0e680bbe0335e74c Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 13:06:29 +0100 Subject: [PATCH 05/40] Disable the use of the docker cache --- .../generate-and-submit-documentation.yaml | 27 +------------------ 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 019af056..e172d70f 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -18,25 +18,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - - name: Clean up Docker images - run: docker system prune -af --volumes - - - name: Free up space on runner - run: | - sudo apt-get clean - sudo rm -rf /var/lib/apt/lists/* - sudo rm -rf /tmp/* - sudo rm -rf /var/tmp/* - - - name: Cache Docker layers - uses: actions/cache@v3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Build + - name: Build Docker image uses: docker/build-push-action@v4 id: built-image with: @@ -45,13 +27,6 @@ jobs: push: false load: true tags: claasp-lib - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max - - - name: Move cache - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache - name: Generate documentation run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp-lib make doc From 3360184a7600355fd75d91f5871432013ece1f59 Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 13:32:17 +0100 Subject: [PATCH 06/40] Disable pip cache --- .github/workflows/generate-and-submit-documentation.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index e172d70f..bf9c9be8 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -27,6 +27,11 @@ jobs: push: false load: true tags: claasp-lib + no-cache: true + build-args: | + PIP_NO_CACHE_DIR=off + PIP_DISABLE_PIP_VERSION_CHECK=1 + PIP_DEFAULT_TIMEOUT=100 - name: Generate documentation run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp-lib make doc From f2e3a35cf2d60952f0ae2e230920b04d586a3149 Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 14:31:36 +0100 Subject: [PATCH 07/40] Upgrade docker build action --- .github/workflows/generate-and-submit-documentation.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index bf9c9be8..895b3533 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -19,7 +19,7 @@ jobs: uses: docker/setup-buildx-action@v2 - name: Build Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 id: built-image with: context: . @@ -27,11 +27,6 @@ jobs: push: false load: true tags: claasp-lib - no-cache: true - build-args: | - PIP_NO_CACHE_DIR=off - PIP_DISABLE_PIP_VERSION_CHECK=1 - PIP_DEFAULT_TIMEOUT=100 - name: Generate documentation run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp-lib make doc From 4cd328a6928c433c153beddb38d2d80fbd55972d Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 15:13:43 +0100 Subject: [PATCH 08/40] Check free disk space --- .github/workflows/generate-and-submit-documentation.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 895b3533..9f7d6a16 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -18,6 +18,12 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 + - name: Verify free disk space + run: df -h + + - name: Show folders in /root/.cache/ + run: ls -la /root/.cache/ + - name: Build Docker image uses: docker/build-push-action@v5 id: built-image From be70753ebe34579175f95dab391d2d156b2cafc5 Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Thu, 23 May 2024 15:25:28 +0100 Subject: [PATCH 09/40] Add target claasp-base when docker image is building. --- .../workflows/generate-and-submit-documentation.yaml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 9f7d6a16..62cc2b0e 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -18,12 +18,6 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - - name: Verify free disk space - run: df -h - - - name: Show folders in /root/.cache/ - run: ls -la /root/.cache/ - - name: Build Docker image uses: docker/build-push-action@v5 id: built-image @@ -32,10 +26,10 @@ jobs: file: ./docker/Dockerfile push: false load: true - tags: claasp-lib + target: claasp-base - name: Generate documentation - run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp-lib make doc + run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp-base make doc - name: Commit & Push changes uses: actions-js/push@master From 22f535f4695fdbab4263c601618bae7aca73ac99 Mon Sep 17 00:00:00 2001 From: Elena Sacramento Date: Fri, 24 May 2024 07:01:28 +0100 Subject: [PATCH 10/40] Enable the use of the docker cache --- .../generate-and-submit-documentation.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 62cc2b0e..7f4e1f43 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -18,6 +18,14 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - name: Build Docker image uses: docker/build-push-action@v5 id: built-image @@ -27,6 +35,13 @@ jobs: push: false load: true target: claasp-base + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache - name: Generate documentation run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp-base make doc From a94ef2cf4dbbdc7a06c54207b731149aa31eabe2 Mon Sep 17 00:00:00 2001 From: Elena Sacramento Date: Fri, 24 May 2024 07:34:23 +0100 Subject: [PATCH 11/40] Revert "Enable the use of the docker cache" This reverts commit 22f535f4695fdbab4263c601618bae7aca73ac99. --- .../generate-and-submit-documentation.yaml | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 7f4e1f43..62cc2b0e 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -18,14 +18,6 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - - name: Cache Docker layers - uses: actions/cache@v3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - name: Build Docker image uses: docker/build-push-action@v5 id: built-image @@ -35,13 +27,6 @@ jobs: push: false load: true target: claasp-base - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max - - - name: Move cache - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache - name: Generate documentation run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp-base make doc From b57cdba8f8efe67729738c75a2634a7375b38792 Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Fri, 24 May 2024 08:30:27 +0100 Subject: [PATCH 12/40] Change build image --- .../generate-and-submit-documentation.yaml | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 62cc2b0e..3b5cd4f9 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -18,18 +18,11 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - - name: Build Docker image - uses: docker/build-push-action@v5 - id: built-image - with: - context: . - file: ./docker/Dockerfile - push: false - load: true - target: claasp-base + - name: Build the Docker image + run: docker build -f docker/Dockerfile --target claasp-base -t claasp . - name: Generate documentation - run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp-base make doc + run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp make doc - name: Commit & Push changes uses: actions-js/push@master From b3b08217e2811a81a3383f87d48be2413689130b Mon Sep 17 00:00:00 2001 From: Maria Guerra Date: Fri, 24 May 2024 08:58:12 +0100 Subject: [PATCH 13/40] Free up space on runner --- .../workflows/generate-and-submit-documentation.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 3b5cd4f9..560b3e99 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -18,6 +18,16 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 + - name: Free up space on runner + run: | + docker system prune -af --volumes + sudo apt-get clean + sudo rm -rf /var/lib/apt/lists/* + sudo rm -rf /tmp/* + sudo rm -rf /var/tmp/* + sudo rm -rf $HOME/.cache + df -h + - name: Build the Docker image run: docker build -f docker/Dockerfile --target claasp-base -t claasp . From d0afff9122f68d3221f765f37efe5ba70b66cb4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ana=20C=C3=A1ceres?= Date: Thu, 30 May 2024 15:19:27 +0200 Subject: [PATCH 14/40] Run documentation generation on self-hosted runner --- .../generate-and-submit-documentation.yaml | 26 ++++--------------- 1 file changed, 5 insertions(+), 21 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 560b3e99..7a82050d 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -1,5 +1,4 @@ name: Generate and submit documentation - on: push: branches: @@ -7,7 +6,7 @@ on: jobs: generate-documentation: - runs-on: ubuntu-latest + runs-on: claasp-arc-runnerset steps: - name: Checkout uses: actions/checkout@v3 @@ -15,35 +14,20 @@ jobs: persist-credentials: false fetch-depth: 0 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Free up space on runner - run: | - docker system prune -af --volumes - sudo apt-get clean - sudo rm -rf /var/lib/apt/lists/* - sudo rm -rf /tmp/* - sudo rm -rf /var/tmp/* - sudo rm -rf $HOME/.cache - df -h - - - name: Build the Docker image - run: docker build -f docker/Dockerfile --target claasp-base -t claasp . - - name: Generate documentation - run: docker run --rm -v $PWD:/home/sage/tii-claasp claasp make doc + run: make doc - name: Commit & Push changes uses: actions-js/push@master with: + branch: 'fix/generate-documentation-workflow-test-commit' github_token: ${{ secrets.AUTHORIZATION_TOKEN }} message: "Update documentation" - name: Update develop branch uses: morbalint/git-merge-action@v1 with: - target: 'develop' - source: 'main' + target: 'fix/generate-documentation-workflow-test-merge' + source: 'fix/generate-documentation-workflow-test-commit' token: ${{ secrets.AUTHORIZATION_TOKEN }} strategy_options: 'ours' From 63d23ebb055e2366369cc86ec28752cae69385de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ana=20C=C3=A1ceres?= Date: Thu, 30 May 2024 15:28:32 +0200 Subject: [PATCH 15/40] Run documentation generation on self-hosted runner --- .github/workflows/generate-and-submit-documentation.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 7a82050d..cfedf3a2 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -6,7 +6,7 @@ on: jobs: generate-documentation: - runs-on: claasp-arc-runnerset + runs-on: self-hosted steps: - name: Checkout uses: actions/checkout@v3 From 064b41f253741b7beb3eacfd61f0dd83e3754daf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ana=20C=C3=A1ceres?= Date: Thu, 30 May 2024 16:13:21 +0200 Subject: [PATCH 16/40] Run documentation generation on self-hosted runner --- .github/workflows/generate-and-submit-documentation.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index cfedf3a2..b86d433b 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -6,7 +6,7 @@ on: jobs: generate-documentation: - runs-on: self-hosted + runs-on: self-hosted-k3s steps: - name: Checkout uses: actions/checkout@v3 From 46add6b522cd910a139bc4a9785f5124873a1934 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ana=20C=C3=A1ceres?= Date: Thu, 30 May 2024 16:24:57 +0200 Subject: [PATCH 17/40] Comment last two steps of documentation generation --- .../generate-and-submit-documentation.yaml | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index b86d433b..8cca8d4f 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -17,17 +17,17 @@ jobs: - name: Generate documentation run: make doc - - name: Commit & Push changes - uses: actions-js/push@master - with: - branch: 'fix/generate-documentation-workflow-test-commit' - github_token: ${{ secrets.AUTHORIZATION_TOKEN }} - message: "Update documentation" - - - name: Update develop branch - uses: morbalint/git-merge-action@v1 - with: - target: 'fix/generate-documentation-workflow-test-merge' - source: 'fix/generate-documentation-workflow-test-commit' - token: ${{ secrets.AUTHORIZATION_TOKEN }} - strategy_options: 'ours' +# - name: Commit & Push changes +# uses: actions-js/push@master +# with: +# branch: 'fix/generate-documentation-workflow-test-commit' +# github_token: ${{ secrets.AUTHORIZATION_TOKEN }} +# message: "Update documentation" +# +# - name: Update develop branch +# uses: morbalint/git-merge-action@v1 +# with: +# target: 'fix/generate-documentation-workflow-test-merge' +# source: 'fix/generate-documentation-workflow-test-commit' +# token: ${{ secrets.AUTHORIZATION_TOKEN }} +# strategy_options: 'ours' From e2bfec8ef8085ca41713d783a6863b26fc3f3b5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ana=20C=C3=A1ceres?= Date: Wed, 5 Jun 2024 10:36:14 +0100 Subject: [PATCH 18/40] Remove documentation action update to manual update --- .../generate-and-submit-documentation.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index 8cca8d4f..b6160a77 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -17,6 +17,19 @@ jobs: - name: Generate documentation run: make doc + - name: Commit & Push changes + run: | + git clone https://github.com/Crypto-TII/claasp.git claasp-lib + git config --global user.name 'Github' + git config --global user.email ${{ secrets.DEPLOYMENT_REPOSITORY_EMAIL }} + cd claasp-lib + git checkout fix/generate-documentation-workflow-test-commit + make doc + git add . + git commit -m "Update documentation" + git push origin fix/generate-documentation-workflow-test-commit + git merge fix/generate-documentation-workflow-test-commit fix/generate-documentation-workflow-test-merge + # - name: Commit & Push changes # uses: actions-js/push@master # with: From 86bffd07cc5e5b91a35b3f3baf98253b9327bc37 Mon Sep 17 00:00:00 2001 From: Elena Sacramento Date: Wed, 5 Jun 2024 10:56:56 +0100 Subject: [PATCH 19/40] Change branch fix/generate-documentation-workflow-test-commit to actual branch --- .../generate-and-submit-documentation.yaml | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/generate-and-submit-documentation.yaml b/.github/workflows/generate-and-submit-documentation.yaml index b6160a77..2859e569 100644 --- a/.github/workflows/generate-and-submit-documentation.yaml +++ b/.github/workflows/generate-and-submit-documentation.yaml @@ -17,30 +17,30 @@ jobs: - name: Generate documentation run: make doc +# - name: Commit & Push changes +# run: | +# git clone https://github.com/Crypto-TII/claasp.git claasp-lib +# git config --global user.name 'Github' +# git config --global user.email ${{ secrets.DEPLOYMENT_REPOSITORY_EMAIL }} +# cd claasp-lib +# git checkout fix/generate-documentation-workflow-test-commit +# make doc +# git add . +# git commit -m "Update documentation" +# git push origin fix/generate-documentation-workflow-test-commit +# git merge fix/generate-documentation-workflow-test-commit fix/generate-documentation-workflow-test-merge + - name: Commit & Push changes - run: | - git clone https://github.com/Crypto-TII/claasp.git claasp-lib - git config --global user.name 'Github' - git config --global user.email ${{ secrets.DEPLOYMENT_REPOSITORY_EMAIL }} - cd claasp-lib - git checkout fix/generate-documentation-workflow-test-commit - make doc - git add . - git commit -m "Update documentation" - git push origin fix/generate-documentation-workflow-test-commit - git merge fix/generate-documentation-workflow-test-commit fix/generate-documentation-workflow-test-merge + uses: actions-js/push@master + with: + branch: 'fix/generate-documentation-workflow' + github_token: ${{ secrets.AUTHORIZATION_TOKEN }} + message: "Update documentation" -# - name: Commit & Push changes -# uses: actions-js/push@master -# with: -# branch: 'fix/generate-documentation-workflow-test-commit' -# github_token: ${{ secrets.AUTHORIZATION_TOKEN }} -# message: "Update documentation" -# -# - name: Update develop branch -# uses: morbalint/git-merge-action@v1 -# with: -# target: 'fix/generate-documentation-workflow-test-merge' -# source: 'fix/generate-documentation-workflow-test-commit' -# token: ${{ secrets.AUTHORIZATION_TOKEN }} -# strategy_options: 'ours' + - name: Update develop branch + uses: morbalint/git-merge-action@v1 + with: + target: 'fix/generate-documentation-workflow-test-merge' + source: 'fix/generate-documentation-workflow' + token: ${{ secrets.AUTHORIZATION_TOKEN }} + strategy_options: 'ours' From 6798010553c023ed7a827db435038ab0e0c77f1a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 5 Jun 2024 09:59:02 +0000 Subject: [PATCH 20/40] Update documentation --- docs/build/html/.buildinfo | 2 +- .../continuous_diffusion_analysis.rst.txt | 10 + ..._impossible_xor_differential_model.rst.txt | 10 + .../cp/minizinc_utils/usefulfunctions.rst.txt | 10 + .../cipher_modules/models/cp/solvers.rst.txt | 10 + .../models/milp/solvers.rst.txt | 10 + .../milp/utils/milp_truncated_utils.rst.txt | 10 + .../minizinc_boomerang_model.rst.txt | 10 + .../minizinc/utils/mzn_bct_predicates.rst.txt | 10 + .../models/minizinc/utils/utils.rst.txt | 10 + ...c_truncated_xor_differential_model.rst.txt | 10 + ...c_truncated_xor_differential_model.rst.txt | 10 + .../cipher_modules/models/sat/solvers.rst.txt | 10 + .../cipher_modules/models/smt/solvers.rst.txt | 10 + .../_sources/cipher_modules/report.rst.txt | 10 + ...armav2_with_mixcolumn_block_cipher.rst.txt | 10 + .../block_ciphers/scarf_block_cipher.rst.txt | 10 + .../block_ciphers/speedy_block_cipher.rst.txt | 10 + .../permutations/gaston_permutation.rst.txt | 10 + .../gaston_sbox_permutation.rst.txt | 10 + .../stream_ciphers/a5_2_stream_cipher.rst.txt | 10 + docs/build/html/_sources/index.rst.txt | 397 +- docs/build/html/_sources/references.rst.txt | 20 + .../_sphinx_javascript_frameworks_compat.js | 134 + docs/build/html/_static/basic.css | 34 +- docs/build/html/_static/classic.css | 1 + docs/build/html/_static/doctools.js | 480 +- .../html/_static/documentation_options.js | 6 +- docs/build/html/_static/jquery-3.6.0.js | 10881 ++++++++++++++++ docs/build/html/_static/jquery.js | 4 +- docs/build/html/_static/language_data.js | 100 +- docs/build/html/_static/searchtools.js | 784 +- docs/build/html/_static/sidebar.js | 155 +- docs/build/html/cipher.html | 593 +- .../html/cipher_modules/algebraic_tests.html | 76 +- .../html/cipher_modules/avalanche_tests.html | 302 +- .../html/cipher_modules/code_generator.html | 40 +- .../component_analysis_tests.html | 404 +- .../continuous_diffusion_analysis.html | 336 + docs/build/html/cipher_modules/evaluator.html | 35 +- .../generic_bit_based_c_functions.html | 33 +- .../cipher_modules/generic_functions.html | 33 +- ...nctions_continuous_diffusion_analysis.html | 33 +- .../generic_functions_vectorized_bit.html | 35 +- .../generic_functions_vectorized_byte.html | 135 +- .../generic_word_based_c_functions.html | 33 +- .../html/cipher_modules/graph_generator.html | 33 +- .../html/cipher_modules/inverse_cipher.html | 35 +- .../models/algebraic/algebraic_model.html | 85 +- .../algebraic/boolean_polynomial_ring.html | 33 +- .../models/algebraic/constraints.html | 33 +- .../cipher_modules/models/cp/cp_model.html | 44 +- .../models/cp/cp_models/cp_cipher_model.html | 53 +- ...stic_truncated_xor_differential_model.html | 60 +- .../cp_impossible_xor_differential_model.html | 883 ++ .../cp_models/cp_xor_differential_model.html | 238 +- ...rential_number_of_active_sboxes_model.html | 46 +- ..._fixing_number_of_active_sboxes_model.html | 107 +- .../cp/cp_models/cp_xor_linear_model.html | 169 +- .../cp/minizinc_utils/usefulfunctions.html | 221 + .../cipher_modules/models/cp/solvers.html | 221 + .../models/milp/milp_model.html | 48 +- ...stic_truncated_xor_differential_model.html | 51 +- ...ise_impossible_xor_differential_model.html | 80 +- .../milp/milp_models/milp_cipher_model.html | 33 +- ...stic_truncated_xor_differential_model.html | 51 +- ...ise_impossible_xor_differential_model.html | 64 +- .../milp_xor_differential_model.html | 210 +- .../milp_models/milp_xor_linear_model.html | 181 +- .../cipher_modules/models/milp/solvers.html | 221 + .../milp/tmp/tea_cipher_xordiff_model.html | 33 +- ..._truncated_input_pattern_inequalities.html | 33 +- ...containing_truncated_mds_inequalities.html | 23 +- ...xor_inequalities_between_n_input_bits.html | 33 +- ...xor_inequalities_between_n_input_bits.html | 33 +- ...ontains_inequalities_for_large_sboxes.html | 33 +- ...qualities_for_large_sboxes_xor_linear.html | 23 +- ...ties_for_sboxes_with_undisturbed_bits.html | 33 +- ...ontains_inequalities_for_small_sboxes.html | 33 +- ...qualities_for_small_sboxes_xor_linear.html | 33 +- ...lities_for_and_operation_2_input_bits.html | 33 +- ...enerate_inequalities_for_large_sboxes.html | 33 +- ...s_for_wordwise_truncated_mds_matrices.html | 33 +- ...dwise_truncated_xor_with_n_input_bits.html | 33 +- ...nequalities_for_xor_with_n_input_bits.html | 33 +- ...te_sbox_inequalities_for_trail_search.html | 33 +- ...isturbed_bits_inequalities_for_sboxes.html | 33 +- .../models/milp/utils/milp_name_mappings.html | 33 +- .../milp/utils/milp_truncated_utils.html | 236 + .../models/milp/utils/mzn_predicates.html | 33 +- .../models/milp/utils/utils.html | 38 +- .../models/minizinc/minizinc_model.html | 23 +- .../minizinc_boomerang_model.html | 221 + .../minizinc_cipher_model.html | 35 +- ...stic_truncated_xor_differential_model.html | 35 +- .../minizinc_xor_differential_model.html | 47 +- .../minizinc/utils/mzn_bct_predicates.html | 226 + .../models/minizinc/utils/utils.html | 231 + ...stic_truncated_xor_differential_model.html | 509 + .../sat/cms_models/cms_cipher_model.html | 45 +- .../cms_xor_differential_model.html | 244 +- .../sat/cms_models/cms_xor_linear_model.html | 169 +- .../cipher_modules/models/sat/sat_model.html | 102 +- ...stic_truncated_xor_differential_model.html | 479 + .../sat/sat_models/sat_cipher_model.html | 43 +- .../sat_xor_differential_model.html | 232 +- .../sat/sat_models/sat_xor_linear_model.html | 157 +- .../cipher_modules/models/sat/solvers.html | 243 + .../models/sat/utils/mzn_predicates.html | 23 +- .../sat/utils/n_window_heuristic_helper.html | 53 +- .../models/sat/utils/utils.html | 158 +- .../cipher_modules/models/smt/smt_model.html | 75 +- .../smt/smt_models/smt_cipher_model.html | 45 +- ...stic_truncated_xor_differential_model.html | 41 +- .../smt_xor_differential_model.html | 207 +- .../smt/smt_models/smt_xor_linear_model.html | 166 +- .../cipher_modules/models/smt/solvers.html | 239 + .../models/smt/utils/utils.html | 33 +- .../html/cipher_modules/models/utils.html | 35 +- .../cipher_modules/neural_network_tests.html | 33 +- docs/build/html/cipher_modules/report.html | 274 + .../statistical_tests/dataset_generator.html | 35 +- .../dieharder_statistical_tests.html | 423 +- .../statistical_tests/input_data_example.html | 33 +- .../nist_statistical_tests.html | 419 +- docs/build/html/cipher_modules/tester.html | 33 +- .../block_ciphers/aes_block_cipher.html | 601 +- .../block_ciphers/bea1_block_cipher.html | 595 +- .../block_ciphers/constant_block_cipher.html | 599 +- .../block_ciphers/des_block_cipher.html | 595 +- .../des_exact_key_length_block_cipher.html | 595 +- .../block_ciphers/fancy_block_cipher.html | 595 +- .../block_ciphers/hight_block_cipher.html | 589 +- .../block_ciphers/identity_block_cipher.html | 595 +- .../block_ciphers/kasumi_block_cipher.html | 613 +- .../block_ciphers/lblock_block_cipher.html | 595 +- .../block_ciphers/lea_block_cipher.html | 595 +- .../block_ciphers/lowmc_block_cipher.html | 585 +- .../lowmc_generate_matrices.html | 33 +- .../block_ciphers/midori_block_cipher.html | 595 +- .../block_ciphers/present_block_cipher.html | 595 +- .../block_ciphers/qarmav2_block_cipher.html | 649 +- .../qarmav2_with_mixcolumn_block_cipher.html | 1381 ++ .../block_ciphers/raiden_block_cipher.html | 595 +- .../block_ciphers/rc5_block_cipher.html | 595 +- .../block_ciphers/scarf_block_cipher.html | 1349 ++ .../block_ciphers/simon_block_cipher.html | 595 +- .../block_ciphers/skinny_block_cipher.html | 595 +- .../block_ciphers/sparx_block_cipher.html | 595 +- .../block_ciphers/speck_block_cipher.html | 595 +- .../block_ciphers/speedy_block_cipher.html | 1317 ++ .../block_ciphers/tea_block_cipher.html | 595 +- .../block_ciphers/threefish_block_cipher.html | 595 +- .../block_ciphers/twofish_block_cipher.html | 595 +- .../block_ciphers/xtea_block_cipher.html | 595 +- .../hash_functions/blake2_hash_function.html | 595 +- .../hash_functions/blake_hash_function.html | 595 +- .../hash_functions/md5_hash_function.html | 595 +- .../hash_functions/sha1_hash_function.html | 595 +- .../hash_functions/sha2_hash_function.html | 595 +- .../whirlpool_hash_function.html | 601 +- .../permutations/ascon_permutation.html | 595 +- ...scon_sbox_sigma_no_matrix_permutation.html | 595 +- .../ascon_sbox_sigma_permutation.html | 595 +- .../permutations/chacha_permutation.html | 589 +- .../permutations/gaston_permutation.html | 1357 ++ .../permutations/gaston_sbox_permutation.html | 1358 ++ .../permutations/gift_permutation.html | 595 +- .../permutations/gift_sbox_permutation.html | 595 +- .../permutations/gimli_permutation.html | 595 +- .../permutations/gimli_sbox_permutation.html | 595 +- .../permutations/grain_core_permutation.html | 595 +- .../keccak_invertible_permutation.html | 595 +- .../permutations/keccak_permutation.html | 595 +- .../permutations/keccak_sbox_permutation.html | 585 +- .../permutations/photon_permutation.html | 585 +- .../permutations/salsa_permutation.html | 599 +- .../permutations/sparkle_permutation.html | 595 +- .../spongent_pi_fsr_permutation.html | 595 +- .../permutations/spongent_pi_permutation.html | 595 +- ...pongent_pi_precomputation_permutation.html | 595 +- .../tinyjambu_32bits_word_permutation.html | 595 +- ...tinyjambu_fsr_32bits_word_permutation.html | 595 +- .../permutations/tinyjambu_permutation.html | 595 +- .../build/html/ciphers/permutations/util.html | 38 +- .../xoodoo_invertible_permutation.html | 595 +- .../permutations/xoodoo_permutation.html | 595 +- .../permutations/xoodoo_sbox_permutation.html | 585 +- .../stream_ciphers/a5_1_stream_cipher.html | 595 +- .../stream_ciphers/a5_2_stream_cipher.html | 1324 ++ .../stream_ciphers/bivium_stream_cipher.html | 595 +- .../bluetooth_stream_cipher_e0.html | 595 +- .../stream_ciphers/chacha_stream_cipher.html | 595 +- .../stream_ciphers/snow3g_stream_cipher.html | 599 +- .../stream_ciphers/trivium_stream_cipher.html | 585 +- .../stream_ciphers/zuc_stream_cipher.html | 585 +- docs/build/html/ciphers/toys/toyspn1.html | 585 +- docs/build/html/ciphers/toys/toyspn2.html | 585 +- docs/build/html/component.html | 33 +- docs/build/html/components/and_component.html | 58 +- .../components/cipher_output_component.html | 69 +- .../components/concatenate_component.html | 25 +- .../html/components/constant_component.html | 69 +- docs/build/html/components/fsr_component.html | 35 +- .../intermediate_output_component.html | 69 +- .../components/linear_layer_component.html | 59 +- .../html/components/mix_column_component.html | 59 +- .../html/components/modadd_component.html | 73 +- .../html/components/modsub_component.html | 102 +- .../html/components/modular_component.html | 78 +- ...non_linear_logical_operator_component.html | 68 +- docs/build/html/components/not_component.html | 64 +- docs/build/html/components/or_component.html | 127 +- .../components/permutation_component.html | 59 +- .../html/components/reverse_component.html | 59 +- .../html/components/rotate_component.html | 69 +- .../build/html/components/sbox_component.html | 96 +- .../html/components/shift_component.html | 59 +- .../html/components/shift_rows_component.html | 69 +- .../html/components/sigma_component.html | 59 +- .../components/theta_keccak_component.html | 59 +- .../components/theta_xoodoo_component.html | 59 +- .../components/variable_rotate_component.html | 35 +- .../components/variable_shift_component.html | 35 +- .../word_permutation_component.html | 59 +- docs/build/html/components/xor_component.html | 54 +- .../compound_xor_differential_cipher.html | 33 +- docs/build/html/editor.html | 33 +- docs/build/html/genindex-A.html | 811 +- docs/build/html/genindex-B.html | 35 +- docs/build/html/genindex-C.html | 1644 +-- docs/build/html/genindex-D.html | 155 +- docs/build/html/genindex-E.html | 73 +- docs/build/html/genindex-F.html | 243 +- docs/build/html/genindex-G.html | 541 +- docs/build/html/genindex-H.html | 11 +- docs/build/html/genindex-I.html | 179 +- docs/build/html/genindex-K.html | 21 +- docs/build/html/genindex-L.html | 19 +- docs/build/html/genindex-M.html | 99 +- docs/build/html/genindex-N.html | 289 +- docs/build/html/genindex-O.html | 37 +- docs/build/html/genindex-P.html | 261 +- docs/build/html/genindex-Q.html | 15 +- docs/build/html/genindex-R.html | 263 +- docs/build/html/genindex-S.html | 231 +- docs/build/html/genindex-T.html | 325 +- docs/build/html/genindex-U.html | 169 +- docs/build/html/genindex-V.html | 13 +- docs/build/html/genindex-W.html | 49 +- docs/build/html/genindex-X.html | 13 +- docs/build/html/genindex-Y.html | 11 +- docs/build/html/genindex-Z.html | 23 +- docs/build/html/genindex-all.html | 6148 ++++----- docs/build/html/genindex.html | 11 +- docs/build/html/index.html | 536 +- docs/build/html/input.html | 33 +- docs/build/html/objects.inv | Bin 62693 -> 61064 bytes docs/build/html/py-modindex.html | 98 +- docs/build/html/references.html | 33 +- docs/build/html/round.html | 38 +- docs/build/html/rounds.html | 33 +- docs/build/html/search.html | 9 +- docs/build/html/searchindex.js | 1 + docs/build/html/utils/integer.html | 33 +- docs/build/html/utils/integer_functions.html | 23 +- docs/build/html/utils/sage_scripts.html | 35 +- .../build/html/utils/sequence_operations.html | 33 +- docs/build/html/utils/templates.html | 39 +- docs/build/html/utils/utils.html | 47 +- 270 files changed, 37316 insertions(+), 48163 deletions(-) create mode 100644 docs/build/html/_sources/cipher_modules/continuous_diffusion_analysis.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/cp/minizinc_utils/usefulfunctions.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/cp/solvers.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/milp/solvers.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/milp/utils/milp_truncated_utils.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/minizinc/utils/mzn_bct_predicates.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/minizinc/utils/utils.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/sat/solvers.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/models/smt/solvers.rst.txt create mode 100644 docs/build/html/_sources/cipher_modules/report.rst.txt create mode 100644 docs/build/html/_sources/ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher.rst.txt create mode 100644 docs/build/html/_sources/ciphers/block_ciphers/scarf_block_cipher.rst.txt create mode 100644 docs/build/html/_sources/ciphers/block_ciphers/speedy_block_cipher.rst.txt create mode 100644 docs/build/html/_sources/ciphers/permutations/gaston_permutation.rst.txt create mode 100644 docs/build/html/_sources/ciphers/permutations/gaston_sbox_permutation.rst.txt create mode 100644 docs/build/html/_sources/ciphers/stream_ciphers/a5_2_stream_cipher.rst.txt create mode 100644 docs/build/html/_static/_sphinx_javascript_frameworks_compat.js create mode 100644 docs/build/html/_static/jquery-3.6.0.js create mode 100644 docs/build/html/cipher_modules/continuous_diffusion_analysis.html create mode 100644 docs/build/html/cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model.html create mode 100644 docs/build/html/cipher_modules/models/cp/minizinc_utils/usefulfunctions.html create mode 100644 docs/build/html/cipher_modules/models/cp/solvers.html create mode 100644 docs/build/html/cipher_modules/models/milp/solvers.html create mode 100644 docs/build/html/cipher_modules/models/milp/utils/milp_truncated_utils.html create mode 100644 docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model.html create mode 100644 docs/build/html/cipher_modules/models/minizinc/utils/mzn_bct_predicates.html create mode 100644 docs/build/html/cipher_modules/models/minizinc/utils/utils.html create mode 100644 docs/build/html/cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model.html create mode 100644 docs/build/html/cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model.html create mode 100644 docs/build/html/cipher_modules/models/sat/solvers.html create mode 100644 docs/build/html/cipher_modules/models/smt/solvers.html create mode 100644 docs/build/html/cipher_modules/report.html create mode 100644 docs/build/html/ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher.html create mode 100644 docs/build/html/ciphers/block_ciphers/scarf_block_cipher.html create mode 100644 docs/build/html/ciphers/block_ciphers/speedy_block_cipher.html create mode 100644 docs/build/html/ciphers/permutations/gaston_permutation.html create mode 100644 docs/build/html/ciphers/permutations/gaston_sbox_permutation.html create mode 100644 docs/build/html/ciphers/stream_ciphers/a5_2_stream_cipher.html create mode 100644 docs/build/html/searchindex.js diff --git a/docs/build/html/.buildinfo b/docs/build/html/.buildinfo index d7e5bc30..86ccafe4 100644 --- a/docs/build/html/.buildinfo +++ b/docs/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 08a53bd5d60dd2e28390fe56d9e51a12 +config: e00818db5025b84c485df36992eb573f tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/build/html/_sources/cipher_modules/continuous_diffusion_analysis.rst.txt b/docs/build/html/_sources/cipher_modules/continuous_diffusion_analysis.rst.txt new file mode 100644 index 00000000..e74a2616 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/continuous_diffusion_analysis.rst.txt @@ -0,0 +1,10 @@ +----------------------------- +Continuous diffusion analysis +----------------------------- + +.. automodule:: cipher_modules.continuous_diffusion_analysis + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model.rst.txt b/docs/build/html/_sources/cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model.rst.txt new file mode 100644 index 00000000..7a4923a0 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model.rst.txt @@ -0,0 +1,10 @@ +------------------------------------ +Cp impossible xor differential model +------------------------------------ + +.. automodule:: cipher_modules.models.cp.cp_models.cp_impossible_xor_differential_model + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/cp/minizinc_utils/usefulfunctions.rst.txt b/docs/build/html/_sources/cipher_modules/models/cp/minizinc_utils/usefulfunctions.rst.txt new file mode 100644 index 00000000..d52da7a4 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/cp/minizinc_utils/usefulfunctions.rst.txt @@ -0,0 +1,10 @@ +--------------- +Usefulfunctions +--------------- + +.. automodule:: cipher_modules.models.cp.minizinc_utils.usefulfunctions + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/cp/solvers.rst.txt b/docs/build/html/_sources/cipher_modules/models/cp/solvers.rst.txt new file mode 100644 index 00000000..8bd94998 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/cp/solvers.rst.txt @@ -0,0 +1,10 @@ +------- +Solvers +------- + +.. automodule:: cipher_modules.models.cp.solvers + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/milp/solvers.rst.txt b/docs/build/html/_sources/cipher_modules/models/milp/solvers.rst.txt new file mode 100644 index 00000000..d39ce242 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/milp/solvers.rst.txt @@ -0,0 +1,10 @@ +------- +Solvers +------- + +.. automodule:: cipher_modules.models.milp.solvers + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/milp/utils/milp_truncated_utils.rst.txt b/docs/build/html/_sources/cipher_modules/models/milp/utils/milp_truncated_utils.rst.txt new file mode 100644 index 00000000..9caf608f --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/milp/utils/milp_truncated_utils.rst.txt @@ -0,0 +1,10 @@ +-------------------- +Milp truncated utils +-------------------- + +.. automodule:: cipher_modules.models.milp.utils.milp_truncated_utils + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model.rst.txt b/docs/build/html/_sources/cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model.rst.txt new file mode 100644 index 00000000..781f7983 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model.rst.txt @@ -0,0 +1,10 @@ +------------------------ +Minizinc boomerang model +------------------------ + +.. automodule:: cipher_modules.models.minizinc.minizinc_models.minizinc_boomerang_model + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/minizinc/utils/mzn_bct_predicates.rst.txt b/docs/build/html/_sources/cipher_modules/models/minizinc/utils/mzn_bct_predicates.rst.txt new file mode 100644 index 00000000..92d12995 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/minizinc/utils/mzn_bct_predicates.rst.txt @@ -0,0 +1,10 @@ +------------------ +Mzn bct predicates +------------------ + +.. automodule:: cipher_modules.models.minizinc.utils.mzn_bct_predicates + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/minizinc/utils/utils.rst.txt b/docs/build/html/_sources/cipher_modules/models/minizinc/utils/utils.rst.txt new file mode 100644 index 00000000..3c0c8325 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/minizinc/utils/utils.rst.txt @@ -0,0 +1,10 @@ +----- +Utils +----- + +.. automodule:: cipher_modules.models.minizinc.utils.utils + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model.rst.txt b/docs/build/html/_sources/cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model.rst.txt new file mode 100644 index 00000000..47dde950 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model.rst.txt @@ -0,0 +1,10 @@ +---------------------------------------------------------- +Cms bitwise deterministic truncated xor differential model +---------------------------------------------------------- + +.. automodule:: cipher_modules.models.sat.cms_models.cms_bitwise_deterministic_truncated_xor_differential_model + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model.rst.txt b/docs/build/html/_sources/cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model.rst.txt new file mode 100644 index 00000000..167d25d7 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model.rst.txt @@ -0,0 +1,10 @@ +---------------------------------------------------------- +Sat bitwise deterministic truncated xor differential model +---------------------------------------------------------- + +.. automodule:: cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/sat/solvers.rst.txt b/docs/build/html/_sources/cipher_modules/models/sat/solvers.rst.txt new file mode 100644 index 00000000..67a9c77e --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/sat/solvers.rst.txt @@ -0,0 +1,10 @@ +------- +Solvers +------- + +.. automodule:: cipher_modules.models.sat.solvers + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/models/smt/solvers.rst.txt b/docs/build/html/_sources/cipher_modules/models/smt/solvers.rst.txt new file mode 100644 index 00000000..88edd6ed --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/models/smt/solvers.rst.txt @@ -0,0 +1,10 @@ +------- +Solvers +------- + +.. automodule:: cipher_modules.models.smt.solvers + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/cipher_modules/report.rst.txt b/docs/build/html/_sources/cipher_modules/report.rst.txt new file mode 100644 index 00000000..74218769 --- /dev/null +++ b/docs/build/html/_sources/cipher_modules/report.rst.txt @@ -0,0 +1,10 @@ +------ +Report +------ + +.. automodule:: cipher_modules.report + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher.rst.txt b/docs/build/html/_sources/ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher.rst.txt new file mode 100644 index 00000000..abdbccbd --- /dev/null +++ b/docs/build/html/_sources/ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher.rst.txt @@ -0,0 +1,10 @@ +----------------------------------- +Qarmav2 with mixcolumn block cipher +----------------------------------- + +.. automodule:: ciphers.block_ciphers.qarmav2_with_mixcolumn_block_cipher + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/ciphers/block_ciphers/scarf_block_cipher.rst.txt b/docs/build/html/_sources/ciphers/block_ciphers/scarf_block_cipher.rst.txt new file mode 100644 index 00000000..8d4685c4 --- /dev/null +++ b/docs/build/html/_sources/ciphers/block_ciphers/scarf_block_cipher.rst.txt @@ -0,0 +1,10 @@ +------------------ +Scarf block cipher +------------------ + +.. automodule:: ciphers.block_ciphers.scarf_block_cipher + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/ciphers/block_ciphers/speedy_block_cipher.rst.txt b/docs/build/html/_sources/ciphers/block_ciphers/speedy_block_cipher.rst.txt new file mode 100644 index 00000000..2cad56e5 --- /dev/null +++ b/docs/build/html/_sources/ciphers/block_ciphers/speedy_block_cipher.rst.txt @@ -0,0 +1,10 @@ +------------------- +Speedy block cipher +------------------- + +.. automodule:: ciphers.block_ciphers.speedy_block_cipher + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/ciphers/permutations/gaston_permutation.rst.txt b/docs/build/html/_sources/ciphers/permutations/gaston_permutation.rst.txt new file mode 100644 index 00000000..5da3db9e --- /dev/null +++ b/docs/build/html/_sources/ciphers/permutations/gaston_permutation.rst.txt @@ -0,0 +1,10 @@ +------------------ +Gaston permutation +------------------ + +.. automodule:: ciphers.permutations.gaston_permutation + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/ciphers/permutations/gaston_sbox_permutation.rst.txt b/docs/build/html/_sources/ciphers/permutations/gaston_sbox_permutation.rst.txt new file mode 100644 index 00000000..05865a1a --- /dev/null +++ b/docs/build/html/_sources/ciphers/permutations/gaston_sbox_permutation.rst.txt @@ -0,0 +1,10 @@ +----------------------- +Gaston sbox permutation +----------------------- + +.. automodule:: ciphers.permutations.gaston_sbox_permutation + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/ciphers/stream_ciphers/a5_2_stream_cipher.rst.txt b/docs/build/html/_sources/ciphers/stream_ciphers/a5_2_stream_cipher.rst.txt new file mode 100644 index 00000000..646f7214 --- /dev/null +++ b/docs/build/html/_sources/ciphers/stream_ciphers/a5_2_stream_cipher.rst.txt @@ -0,0 +1,10 @@ +------------------ +A5 2 stream cipher +------------------ + +.. automodule:: ciphers.stream_ciphers.a5_2_stream_cipher + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + diff --git a/docs/build/html/_sources/index.rst.txt b/docs/build/html/_sources/index.rst.txt index 3d8391eb..983ef4d3 100644 --- a/docs/build/html/_sources/index.rst.txt +++ b/docs/build/html/_sources/index.rst.txt @@ -13,37 +13,81 @@ CLAASP following SageMath guidelines. .. toctree:: - compound_xor_differential_cipher editor + compound_xor_differential_cipher + input cipher - component rounds + component round - input + +Components +========== + +.. toctree:: + + components/reverse_component + components/rotate_component + components/sbox_component + components/and_component + components/shift_component + components/multi_input_non_linear_logical_operator_component + components/not_component + components/mix_column_component + components/intermediate_output_component + components/cipher_output_component + components/permutation_component + components/theta_keccak_component + components/theta_xoodoo_component + components/or_component + components/fsr_component + components/shift_rows_component + components/constant_component + components/modadd_component + components/xor_component + components/concatenate_component + components/variable_rotate_component + components/linear_layer_component + components/sigma_component + components/variable_shift_component + components/word_permutation_component + components/modsub_component + components/modular_component Cipher modules ============== .. toctree:: + cipher_modules/generic_functions_continuous_diffusion_analysis + cipher_modules/evaluator cipher_modules/generic_bit_based_c_functions - cipher_modules/generic_bit_based_c_functions - cipher_modules/component_analysis_tests - cipher_modules/generic_word_based_c_functions - cipher_modules/generic_functions_vectorized_bit + cipher_modules/generic_functions_vectorized_byte + cipher_modules/graph_generator + cipher_modules/tester cipher_modules/code_generator - cipher_modules/generic_functions_continuous_diffusion_analysis cipher_modules/generic_word_based_c_functions - cipher_modules/generic_functions_vectorized_byte - cipher_modules/avalanche_tests + cipher_modules/report cipher_modules/algebraic_tests - cipher_modules/neural_network_tests - cipher_modules/graph_generator - cipher_modules/inverse_cipher + cipher_modules/avalanche_tests cipher_modules/generic_functions - cipher_modules/tester - cipher_modules/evaluator - cipher_modules/continuous_tests + cipher_modules/continuous_diffusion_analysis + cipher_modules/generic_word_based_c_functions + cipher_modules/generic_bit_based_c_functions + cipher_modules/component_analysis_tests + cipher_modules/inverse_cipher + cipher_modules/neural_network_tests + cipher_modules/generic_functions_vectorized_bit + +Statistical tests +----------------- + +.. toctree:: + + cipher_modules/statistical_tests/input_data_example + cipher_modules/statistical_tests/dieharder_statistical_tests + cipher_modules/statistical_tests/dataset_generator + cipher_modules/statistical_tests/nist_statistical_tests Models ------ @@ -52,27 +96,40 @@ Models cipher_modules/models/utils -Minizinc -```````` +Cp +`` .. toctree:: - cipher_modules/models/minizinc/minizinc_model + cipher_modules/models/cp/cp_model + cipher_modules/models/cp/solvers -Minizinc models -''''''''''''''' +Minizinc utils +'''''''''''''' .. toctree:: - cipher_modules/models/minizinc/minizinc_models/minizinc_xor_differential_model - cipher_modules/models/minizinc/minizinc_models/minizinc_deterministic_truncated_xor_differential_model - cipher_modules/models/minizinc/minizinc_models/minizinc_cipher_model + cipher_modules/models/cp/minizinc_utils/usefulfunctions + +Cp models +''''''''' + +.. toctree:: + + cipher_modules/models/cp/cp_models/cp_xor_differential_number_of_active_sboxes_model + cipher_modules/models/cp/cp_models/cp_xor_linear_model + cipher_modules/models/cp/cp_models/cp_xor_differential_model + cipher_modules/models/cp/cp_models/cp_xor_differential_trail_search_fixing_number_of_active_sboxes_model + cipher_modules/models/cp/cp_models/cp_deterministic_truncated_xor_differential_model + cipher_modules/models/cp/cp_models/cp_cipher_model + cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model Sat ``` .. toctree:: + cipher_modules/models/sat/solvers cipher_modules/models/sat/sat_model Sat models @@ -80,53 +137,29 @@ Sat models .. toctree:: - cipher_modules/models/sat/sat_models/sat_deterministic_truncated_xor_differential_model + cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model + cipher_modules/models/sat/sat_models/sat_cipher_model cipher_modules/models/sat/sat_models/sat_xor_linear_model cipher_modules/models/sat/sat_models/sat_xor_differential_model - cipher_modules/models/sat/sat_models/sat_cipher_model - -Cms models -'''''''''' - -.. toctree:: - - cipher_modules/models/sat/cms_models/cms_xor_linear_model - cipher_modules/models/sat/cms_models/cms_xor_differential_model - cipher_modules/models/sat/cms_models/cms_deterministic_truncated_xor_differential_model - cipher_modules/models/sat/cms_models/cms_cipher_model Utils ''''' .. toctree:: - cipher_modules/models/sat/utils/n_window_heuristic_helper cipher_modules/models/sat/utils/mzn_predicates cipher_modules/models/sat/utils/utils + cipher_modules/models/sat/utils/n_window_heuristic_helper -Smt -``` - -.. toctree:: - - cipher_modules/models/smt/smt_model - -Smt models +Cms models '''''''''' .. toctree:: - cipher_modules/models/smt/smt_models/smt_xor_differential_model - cipher_modules/models/smt/smt_models/smt_deterministic_truncated_xor_differential_model - cipher_modules/models/smt/smt_models/smt_cipher_model - cipher_modules/models/smt/smt_models/smt_xor_linear_model - -Utils -''''' - -.. toctree:: - - cipher_modules/models/smt/utils/utils + cipher_modules/models/sat/cms_models/cms_xor_differential_model + cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model + cipher_modules/models/sat/cms_models/cms_cipher_model + cipher_modules/models/sat/cms_models/cms_xor_linear_model Milp ```` @@ -134,19 +167,20 @@ Milp .. toctree:: cipher_modules/models/milp/milp_model + cipher_modules/models/milp/solvers Milp models ''''''''''' .. toctree:: - cipher_modules/models/milp/milp_models/milp_xor_linear_model cipher_modules/models/milp/milp_models/milp_wordwise_deterministic_truncated_xor_differential_model - cipher_modules/models/milp/milp_models/milp_xor_differential_model - cipher_modules/models/milp/milp_models/milp_bitwise_deterministic_truncated_xor_differential_model - cipher_modules/models/milp/milp_models/milp_bitwise_impossible_xor_differential_model cipher_modules/models/milp/milp_models/milp_wordwise_impossible_xor_differential_model cipher_modules/models/milp/milp_models/milp_cipher_model + cipher_modules/models/milp/milp_models/milp_xor_linear_model + cipher_modules/models/milp/milp_models/milp_bitwise_impossible_xor_differential_model + cipher_modules/models/milp/milp_models/milp_bitwise_deterministic_truncated_xor_differential_model + cipher_modules/models/milp/milp_models/milp_xor_differential_model Tmp ''' @@ -160,165 +194,185 @@ Utils .. toctree:: - cipher_modules/models/milp/utils/dictionary_containing_truncated_input_pattern_inequalities cipher_modules/models/milp/utils/milp_name_mappings - cipher_modules/models/milp/utils/generate_inequalities_for_large_sboxes - cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes - cipher_modules/models/milp/utils/generate_inequalities_for_xor_with_n_input_bits - cipher_modules/models/milp/utils/dictionary_containing_xor_inequalities_between_n_input_bits - cipher_modules/models/milp/utils/generate_inequalities_for_and_operation_2_input_bits - cipher_modules/models/milp/utils/generate_sbox_inequalities_for_trail_search cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_sboxes_with_undisturbed_bits - cipher_modules/models/milp/utils/dictionary_containing_truncated_xor_inequalities_between_n_input_bits - cipher_modules/models/milp/utils/generate_undisturbed_bits_inequalities_for_sboxes + cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_xor_with_n_input_bits + cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes cipher_modules/models/milp/utils/mzn_predicates - cipher_modules/models/milp/utils/utils + cipher_modules/models/milp/utils/generate_inequalities_for_xor_with_n_input_bits cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes_xor_linear cipher_modules/models/milp/utils/dictionary_containing_truncated_mds_inequalities - cipher_modules/models/milp/utils/config - cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_xor_with_n_input_bits - cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes_xor_linear + cipher_modules/models/milp/utils/generate_inequalities_for_and_operation_2_input_bits + cipher_modules/models/milp/utils/dictionary_containing_xor_inequalities_between_n_input_bits cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_mds_matrices + cipher_modules/models/milp/utils/generate_undisturbed_bits_inequalities_for_sboxes + cipher_modules/models/milp/utils/milp_truncated_utils + cipher_modules/models/milp/utils/generate_inequalities_for_large_sboxes + cipher_modules/models/milp/utils/dictionary_containing_truncated_input_pattern_inequalities + cipher_modules/models/milp/utils/utils cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes + cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes_xor_linear + cipher_modules/models/milp/utils/dictionary_containing_truncated_xor_inequalities_between_n_input_bits + cipher_modules/models/milp/utils/generate_sbox_inequalities_for_trail_search -Cp -`` +Smt +``` .. toctree:: - cipher_modules/models/cp/cp_model + cipher_modules/models/smt/smt_model + cipher_modules/models/smt/solvers -Minizinc functions -'''''''''''''''''' +Smt models +'''''''''' .. toctree:: - cipher_modules/models/cp/Minizinc_functions/Usefulfunctions + cipher_modules/models/smt/smt_models/smt_xor_linear_model + cipher_modules/models/smt/smt_models/smt_cipher_model + cipher_modules/models/smt/smt_models/smt_deterministic_truncated_xor_differential_model + cipher_modules/models/smt/smt_models/smt_xor_differential_model -Cp models -''''''''' +Utils +''''' .. toctree:: - cipher_modules/models/cp/cp_models/cp_deterministic_truncated_xor_differential_model - cipher_modules/models/cp/cp_models/cp_xor_differential_trail_search_fixing_number_of_active_sboxes_model - cipher_modules/models/cp/cp_models/cp_cipher_model - cipher_modules/models/cp/cp_models/cp_xor_differential_model - cipher_modules/models/cp/cp_models/cp_xor_linear_model - cipher_modules/models/cp/cp_models/cp_xor_differential_number_of_active_sboxes_model + cipher_modules/models/smt/utils/utils Algebraic ````````` .. toctree:: - cipher_modules/models/algebraic/constraints cipher_modules/models/algebraic/algebraic_model + cipher_modules/models/algebraic/constraints cipher_modules/models/algebraic/boolean_polynomial_ring -Statistical tests ------------------ +Minizinc +```````` .. toctree:: - cipher_modules/statistical_tests/dataset_generator - cipher_modules/statistical_tests/dieharder_statistical_tests - cipher_modules/statistical_tests/input_data_example - cipher_modules/statistical_tests/nist_statistical_tests + cipher_modules/models/minizinc/minizinc_model -Ciphers -======= +Minizinc models +''''''''''''''' .. toctree:: + cipher_modules/models/minizinc/minizinc_models/minizinc_xor_differential_model + cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model + cipher_modules/models/minizinc/minizinc_models/minizinc_cipher_model + cipher_modules/models/minizinc/minizinc_models/minizinc_deterministic_truncated_xor_differential_model -Block ciphers -------------- +Utils +''''' + +.. toctree:: + + cipher_modules/models/minizinc/utils/mzn_bct_predicates + cipher_modules/models/minizinc/utils/utils + +Ciphers +======= .. toctree:: - ciphers/block_ciphers/aes_block_cipher - ciphers/block_ciphers/des_block_cipher - ciphers/block_ciphers/bea1_block_cipher - ciphers/block_ciphers/lblock_block_cipher - ciphers/block_ciphers/speck_block_cipher - ciphers/block_ciphers/midori_block_cipher - ciphers/block_ciphers/fancy_block_cipher - ciphers/block_ciphers/lowmc_generate_matrices - ciphers/block_ciphers/des_exact_key_length_block_cipher - ciphers/block_ciphers/kasumi_block_cipher - ciphers/block_ciphers/raiden_block_cipher - ciphers/block_ciphers/constant_block_cipher - ciphers/block_ciphers/identity_block_cipher - ciphers/block_ciphers/simon_block_cipher - ciphers/block_ciphers/present_block_cipher - ciphers/block_ciphers/hight_block_cipher - ciphers/block_ciphers/lowmc_block_cipher - ciphers/block_ciphers/threefish_block_cipher - ciphers/block_ciphers/skinny_block_cipher - ciphers/block_ciphers/qarmav2_block_cipher - ciphers/block_ciphers/sparx_block_cipher - ciphers/block_ciphers/xtea_block_cipher - ciphers/block_ciphers/twofish_block_cipher - ciphers/block_ciphers/tea_block_cipher - ciphers/block_ciphers/lea_block_cipher - ciphers/block_ciphers/rc5_block_cipher Permutations ------------ .. toctree:: - ciphers/permutations/spongent_pi_permutation - ciphers/permutations/ascon_sbox_sigma_permutation - ciphers/permutations/salsa_permutation + ciphers/permutations/ascon_sbox_sigma_no_matrix_permutation + ciphers/permutations/xoodoo_invertible_permutation ciphers/permutations/tinyjambu_32bits_word_permutation - ciphers/permutations/grain_core_permutation + ciphers/permutations/spongent_pi_fsr_permutation + ciphers/permutations/spongent_pi_precomputation_permutation + ciphers/permutations/gimli_permutation + ciphers/permutations/gift_permutation + ciphers/permutations/xoodoo_permutation + ciphers/permutations/sparkle_permutation + ciphers/permutations/gimli_sbox_permutation ciphers/permutations/xoodoo_sbox_permutation ciphers/permutations/chacha_permutation - ciphers/permutations/keccak_invertible_permutation + ciphers/permutations/spongent_pi_permutation + ciphers/permutations/grain_core_permutation + ciphers/permutations/salsa_permutation + ciphers/permutations/gaston_permutation + ciphers/permutations/ascon_sbox_sigma_permutation ciphers/permutations/keccak_permutation + ciphers/permutations/ascon_permutation + ciphers/permutations/keccak_invertible_permutation + ciphers/permutations/gift_sbox_permutation ciphers/permutations/util - ciphers/permutations/tinyjambu_permutation - ciphers/permutations/sparkle_permutation ciphers/permutations/tinyjambu_fsr_32bits_word_permutation + ciphers/permutations/gaston_sbox_permutation ciphers/permutations/keccak_sbox_permutation ciphers/permutations/photon_permutation - ciphers/permutations/spongent_pi_precomputation_permutation - ciphers/permutations/spongent_pi_fsr_permutation - ciphers/permutations/gimli_sbox_permutation - ciphers/permutations/xoodoo_permutation - ciphers/permutations/ascon_permutation - ciphers/permutations/gift_permutation - ciphers/permutations/xoodoo_invertible_permutation - ciphers/permutations/gimli_permutation - ciphers/permutations/gift_sbox_permutation - ciphers/permutations/ascon_sbox_sigma_no_matrix_permutation + ciphers/permutations/tinyjambu_permutation + +Stream ciphers +-------------- + +.. toctree:: + + ciphers/stream_ciphers/zuc_stream_cipher + ciphers/stream_ciphers/trivium_stream_cipher + ciphers/stream_ciphers/chacha_stream_cipher + ciphers/stream_ciphers/bivium_stream_cipher + ciphers/stream_ciphers/snow3g_stream_cipher + ciphers/stream_ciphers/bluetooth_stream_cipher_e0 + ciphers/stream_ciphers/a5_1_stream_cipher + ciphers/stream_ciphers/a5_2_stream_cipher Hash functions -------------- .. toctree:: - ciphers/hash_functions/blake_hash_function ciphers/hash_functions/sha1_hash_function - ciphers/hash_functions/blake2_hash_function - ciphers/hash_functions/sha2_hash_function ciphers/hash_functions/whirlpool_hash_function + ciphers/hash_functions/blake_hash_function + ciphers/hash_functions/blake2_hash_function ciphers/hash_functions/md5_hash_function + ciphers/hash_functions/sha2_hash_function -Stream ciphers --------------- +Block ciphers +------------- .. toctree:: - ciphers/stream_ciphers/bluetooth_stream_cipher_e0 - ciphers/stream_ciphers/snow3g_stream_cipher - ciphers/stream_ciphers/chacha_stream_cipher - ciphers/stream_ciphers/a5_1_stream_cipher - ciphers/stream_ciphers/bivium_stream_cipher - ciphers/stream_ciphers/zuc_stream_cipher - ciphers/stream_ciphers/trivium_stream_cipher + ciphers/block_ciphers/sparx_block_cipher + ciphers/block_ciphers/constant_block_cipher + ciphers/block_ciphers/fancy_block_cipher + ciphers/block_ciphers/twofish_block_cipher + ciphers/block_ciphers/midori_block_cipher + ciphers/block_ciphers/lblock_block_cipher + ciphers/block_ciphers/bea1_block_cipher + ciphers/block_ciphers/present_block_cipher + ciphers/block_ciphers/speedy_block_cipher + ciphers/block_ciphers/scarf_block_cipher + ciphers/block_ciphers/speck_block_cipher + ciphers/block_ciphers/skinny_block_cipher + ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher + ciphers/block_ciphers/simon_block_cipher + ciphers/block_ciphers/lowmc_generate_matrices + ciphers/block_ciphers/des_block_cipher + ciphers/block_ciphers/qarmav2_block_cipher + ciphers/block_ciphers/kasumi_block_cipher + ciphers/block_ciphers/identity_block_cipher + ciphers/block_ciphers/hight_block_cipher + ciphers/block_ciphers/lowmc_block_cipher + ciphers/block_ciphers/des_exact_key_length_block_cipher + ciphers/block_ciphers/lea_block_cipher + ciphers/block_ciphers/aes_block_cipher + ciphers/block_ciphers/tea_block_cipher + ciphers/block_ciphers/threefish_block_cipher + ciphers/block_ciphers/xtea_block_cipher + ciphers/block_ciphers/raiden_block_cipher + ciphers/block_ciphers/rc5_block_cipher Toys ---- @@ -328,50 +382,17 @@ Toys ciphers/toys/toyspn2 ciphers/toys/toyspn1 -Components -========== - -.. toctree:: - - components/rotate_component - components/shift_rows_component - components/theta_keccak_component - components/reverse_component - components/linear_layer_component - components/intermediate_output_component - components/variable_shift_component - components/or_component - components/modadd_component - components/multi_input_non_linear_logical_operator_component - components/variable_rotate_component - components/not_component - components/sigma_component - components/xor_component - components/concatenate_component - components/permutation_component - components/sbox_component - components/modsub_component - components/theta_xoodoo_component - components/fsr_component - components/modular_component - components/and_component - components/shift_component - components/constant_component - components/mix_column_component - components/word_permutation_component - components/cipher_output_component - Utils ===== .. toctree:: - utils/templates utils/integer + utils/sequence_operations utils/integer_functions utils/utils - utils/sequence_operations utils/sage_scripts + utils/templates diff --git a/docs/build/html/_sources/references.rst.txt b/docs/build/html/_sources/references.rst.txt index a9f81a60..bc05f687 100644 --- a/docs/build/html/_sources/references.rst.txt +++ b/docs/build/html/_sources/references.rst.txt @@ -79,6 +79,19 @@ \Bos, J.W., Ducas, L., Kiltz, E., Lepoint, T., Lyubashevsky, V., Schanck, J.M., Schwabe, P., Seiler, G., Stehlé, D.: CRYSTALS-Kyber: A CCA-Secure Module-Lattice-Based KEM. EuroS&P 2018: 353-367. +.. [BHPR2021] + Bellini E., Hambitzer A., Protopapa M., Rossi M. : *Limitations + of the Use of Neural Networks in Black Box Cryptanalysis* : + In Innovative Security Solutions for Information Technology + and Communications: 14th International Conference, SecITC 2021, + Virtual Event, November 25–26, 2021, Revised Selected Papers. + Springer-Verlag, Berlin, Heidelberg, 100–124 + +.. [BR2021] + Bellini, E., Rossi, M. : *Performance Comparison Between Deep Learning-Based + and Conventional Cryptographic Distinguishers* : In: Arai, K. (eds) Intelligent Computing. + Lecture Notes in Networks and Systems, vol 285. Springer + .. [BKLPPRSV2007] Bogdanov A., Knudsen L., Leander G., Paar C., Poschmann A., Robshaw M., Seurin Y., Vikkelsoe C. : *PRESENT: An Ultra-Lightweight Block Cipher* @@ -304,6 +317,13 @@ **L** +.. [LMM+2021] + Leander G., Moos T., Moradi A., Rasoolzadeh S. (2021). *The SPEEDY + Family of Block Ciphers: Engineering an Ultra Low-Latency Cipher from + Gate Level for Secure Processor Architectures*. IACR Transactions on + Cryptographic Hardware and Embedded Systems, 2021(4), 510–545. + https://doi.org/10.46586/tches.v2021.i4.510-545 + .. [Lin1999] van Lint J. : *Introduction to coding theory* : 3rd ed. Springer-Verlag GTM, 86, 1999 diff --git a/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js b/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 00000000..8549469d --- /dev/null +++ b/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,134 @@ +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/docs/build/html/_static/basic.css b/docs/build/html/_static/basic.css index bf18350b..7243282d 100644 --- a/docs/build/html/_static/basic.css +++ b/docs/build/html/_static/basic.css @@ -222,7 +222,7 @@ table.modindextable td { /* -- general body styles --------------------------------------------------- */ div.body { - min-width: 450px; + min-width: 360px; max-width: 800px; } @@ -428,10 +428,6 @@ table.docutils td, table.docutils th { border-bottom: 1px solid #aaa; } -table.footnote td, table.footnote th { - border: 0 !important; -} - th { text-align: left; padding-right: 5px; @@ -615,6 +611,7 @@ ul.simple p { margin-bottom: 0; } +/* Docutils 0.17 and older (footnotes & citations) */ dl.footnote > dt, dl.citation > dt { float: left; @@ -632,6 +629,33 @@ dl.citation > dd:after { clear: both; } +/* Docutils 0.18+ (footnotes & citations) */ +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +/* Footnotes & citations ends */ + dl.field-list { display: grid; grid-template-columns: fit-content(30%) auto; diff --git a/docs/build/html/_static/classic.css b/docs/build/html/_static/classic.css index 36035009..4153fd8f 100644 --- a/docs/build/html/_static/classic.css +++ b/docs/build/html/_static/classic.css @@ -28,6 +28,7 @@ body { } div.document { + display: flex; background-color: #D1D2F9; } diff --git a/docs/build/html/_static/doctools.js b/docs/build/html/_static/doctools.js index e1bfd708..c3db08d1 100644 --- a/docs/build/html/_static/doctools.js +++ b/docs/build/html/_static/doctools.js @@ -2,357 +2,263 @@ * doctools.js * ~~~~~~~~~~~ * - * Sphinx JavaScript utilities for all documentation. + * Base JavaScript utilities for all Sphinx HTML documentation. * * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ +"use strict"; -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - * - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL - */ -jQuery.urldecode = function(x) { - if (!x) { - return x +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); } - return decodeURIComponent(x.replace(/\+/g, ' ')); }; /** - * small helper function to urlencode strings + * highlight a given string on a node by wrapping it in + * span elements with the given class name. */ -jQuery.urlencode = encodeURIComponent; +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + parent.insertBefore( + span, + parent.insertBefore( document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); } } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; }; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; /** * Small JavaScript module for the documentation. */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - this.initOnKeyListeners(); +const Documentation = { + init: () => { + Documentation.highlightSearchWords(); + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); }, /** * i18n support */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", // gettext and ngettext don't access this so that the functions // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } }, - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; }, - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; }, /** * highlight the search words provided in the url in the text */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, + highlightSearchWords: () => { + const highlight = + new URLSearchParams(window.location.search).get("highlight") || ""; + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); }, /** * helper function to hide the search marks again */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - var url = new URL(window.location); - url.searchParams.delete('highlight'); - window.history.replaceState({}, '', url); + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + const url = new URL(window.location); + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); }, - /** + /** * helper function to focus on search bar */ - focusSearchBar : function() { - $('input[name=q]').first().focus(); + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); }, /** - * make the url absolute + * Initialise the domain index toggle buttons */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); }, - initOnKeyListeners: function() { + initOnKeyListeners: () => { // only install a listener if it is really needed - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && - !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) - return; + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; - $(document).keydown(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box, textarea, dropdown or button - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' - && activeElementType !== 'BUTTON') { - if (event.altKey || event.ctrlKey || event.metaKey) - return; + const blacklistedElements = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", + ]); + document.addEventListener("keydown", (event) => { + if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements + if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys - if (!event.shiftKey) { - switch (event.key) { - case 'ArrowLeft': - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) - break; - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - break; - case 'ArrowRight': - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) - break; - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - break; - case 'Escape': - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) - break; - Documentation.hideSearchWords(); - return false; - } - } - - // some keyboard layouts may need Shift to get / + if (!event.shiftKey) { switch (event.key) { - case '/': - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) - break; - Documentation.focusSearchBar(); - return false; + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + case "Escape": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.hideSearchWords(); + event.preventDefault(); } } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } }); - } + }, }; // quick alias for translations -_ = Documentation.gettext; +const _ = Documentation.gettext; -$(document).ready(function() { - Documentation.init(); -}); +_ready(Documentation.init); diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js index 807c4009..e8fc55b5 100644 --- a/docs/build/html/_static/documentation_options.js +++ b/docs/build/html/_static/documentation_options.js @@ -1,7 +1,7 @@ var DOCUMENTATION_OPTIONS = { URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '1.1.0', - LANGUAGE: 'None', + VERSION: 'v2.5.0', + LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', FILE_SUFFIX: '.html', @@ -10,5 +10,5 @@ var DOCUMENTATION_OPTIONS = { SOURCELINK_SUFFIX: '.txt', NAVIGATION_WITH_KEYS: false, SHOW_SEARCH_SUMMARY: true, - ENABLE_SEARCH_SHORTCUTS: true, + ENABLE_SEARCH_SHORTCUTS: false, }; \ No newline at end of file diff --git a/docs/build/html/_static/jquery-3.6.0.js b/docs/build/html/_static/jquery-3.6.0.js new file mode 100644 index 00000000..fc6c299b --- /dev/null +++ b/docs/build/html/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + - - + + @@ -33,10 +34,10 @@

Navigation

modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Cipher

    +

    Cipher

    class Cipher(family_name, cipher_type, cipher_inputs, cipher_inputs_bit_size, cipher_output_bit_size, cipher_reference_code=None)
    @@ -206,94 +207,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -332,185 +250,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -543,53 +300,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -634,7 +344,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -648,11 +358,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -713,28 +426,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -770,35 +461,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -817,50 +479,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1126,43 +744,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1319,24 +900,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1471,38 +1034,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1579,70 +1110,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1667,13 +1144,13 @@

    Navigation

    Previous topic

    -

    Editor

    +

    Input

    Next topic

    -

    Component

    +

    Rounds

    This Page

    @@ -1691,7 +1168,7 @@

    Quick search

    - +
    @@ -1706,10 +1183,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1717,7 +1194,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/algebraic_tests.html b/docs/build/html/cipher_modules/algebraic_tests.html index 0373b3f2..b1558137 100644 --- a/docs/build/html/cipher_modules/algebraic_tests.html +++ b/docs/build/html/cipher_modules/algebraic_tests.html @@ -1,23 +1,24 @@ - + - Algebraic tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Algebraic tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,12 +57,49 @@

    Navigation

    -

    Algebraic tests

    -
    -
    -algebraic_tests(cipher, timeout)
    +

    Algebraic tests

    +
    +
    +class AlgebraicTests(cipher)
    +

    Bases: object

    +

    Construct an instance of Algebraic Tests of the cipher.

    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.algebraic_tests import AlgebraicTests
    +sage: from claasp.ciphers.toys.toyspn1 import ToySPN1
    +sage: toyspn = ToySPN1(number_of_rounds=2)
    +sage: alg_test = AlgebraicTests(toyspn)
    +sage: alg_test.algebraic_tests(timeout_in_seconds=10)
    +{'input_parameters': {'cipher': toyspn1_p6_k6_o6_r2,
    +  'timeout_in_seconds': 10,
    +  'test_name': 'algebraic_tests'},
    + 'test_results': {'number_of_variables': [24, 42],
    +  'number_of_equations': [34, 74],
    +  'number_of_monomials': [54, 102],
    +  'max_degree_of_equations': [2, 2],
    +  'test_passed': [False, False]}}
    +
    +sage: from claasp.cipher_modules.algebraic_tests import AlgebraicTests
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=1)
    +sage: alg_test = AlgebraicTests(speck)
    +sage: alg_test.algebraic_tests(timeout_in_seconds=30)
    +{'input_parameters': {'cipher': speck_p32_k64_o32_r1,
    +  'timeout_in_seconds': 30,
    +  'test_name': 'algebraic_tests'},
    + 'test_results': {'number_of_variables': [112],
    +  'number_of_equations': [64],
    +  'number_of_monomials': [157],
    +  'max_degree_of_equations': [2],
    +  'test_passed': [True]}}
    +
    +
    +
    +
    +algebraic_tests(timeout_in_seconds=60)
    +
    +
    @@ -73,13 +111,13 @@

    Navigation

    Previous topic

    -

    Avalanche tests

    +

    Report

    This Page

    @@ -97,7 +135,7 @@

    Quick search

    - +
    @@ -112,10 +150,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -123,7 +161,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/avalanche_tests.html b/docs/build/html/cipher_modules/avalanche_tests.html index 07ebc641..d02d48ac 100644 --- a/docs/build/html/cipher_modules/avalanche_tests.html +++ b/docs/build/html/cipher_modules/avalanche_tests.html @@ -1,23 +1,24 @@ - + - Avalanche tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Avalanche tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,126 +57,159 @@

    Navigation

    -

    Avalanche tests

    -
    -
    -add_intermediate_output_components_id_to_dictionary(components)
    -
    - -
    -
    -add_intermediate_output_rounds_id_to_dictionary(cipher)
    -
    - -
    -
    -add_intermediate_output_values_to_dictionary(cipher, criterion_name, dict_intermediate_output_names, dict_parameters, dict_test_results, index, input_name, intermediate_output_name)
    -
    - -
    -
    -add_multicolumns_to_graph(avalanche_results, code, criterion, diff, input, intermediate_output, modulo, nb_occ, output_bit_size, step)
    -
    - -
    -
    -avalanche_probability_vectors(cipher, nb_samples)
    -
    - -
    -
    -avalanche_tests(cipher, number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -
    - -
    -
    -calculate_average_difference(all_output_vectors, criterion_name, dict_parameters, dict_test_results, input_name, intermediate_output_name)
    -
    - -
    -
    -calculate_regular_difference(criterion_name, dict_criterion, dict_intermediate_output_names, dict_parameters, dict_test_results, input_name, intermediate_output_name)
    -
    - -
    -
    -calculate_worst_input_differences(cipher, criterion_name, largest_round_criterion_not_satisfied, dict_test_results, input_name, intermediate_output_name)
    -
    - -
    -
    -compute_criterion_from_avalanche_probability_vectors(cipher, all_avalanche_probability_vectors, avalanche_dependence_uniform_bias)
    -
    - -
    -
    -generate_avalanche_probability_vectors(cipher, dict_intermediate_output_names, inputs, evaluated_inputs, input_diff, index_of_specific_input)
    -
    - -
    -
    -generate_graph_by_differences_positions(avalanche_results, code, criterion, difference_positions, input, intermediate_output, modulo, step)
    -
    - -
    -
    -generate_heatmap_graphs_for_avalanche_tests(cipher, avalanche_results, difference_positions=None, criterion_names=None)
    -
    - -
    -
    -generate_inputs_prime(cipher, index_of_specific_input, input_diff, inputs)
    -
    - -
    -
    -generate_random_inputs(cipher, nb_samples)
    -
    - -
    -
    -get_average_criteria_by_round_input_output(diffusion_tests_results, round_i, input_tag, output_tag)
    -
    - -
    -
    -get_average_criteria_list_by_output_tag(diffusion_tests_results, output_tag)
    -
    - -
    -
    -get_intermediate_output_names(cipher)
    -
    - -
    -
    -init_dictionary_test_results(cipher, dict_intermediate_output_names)
    -
    - -
    -
    -is_output(component)
    -
    - -
    -
    -set_vector_dependence(criterion, input_diff, input_tag, number_of_occurrence, output_tag, vector)
    -
    - -
    -
    -set_vector_dependence_uniform(avalanche_dependence_uniform_bias, criterion, input_diff, input_tag, number_of_occurrence, output_tag, vector)
    -
    - -
    -
    -set_vector_entropy(criterion, input_diff, input_tag, number_of_occurrence, output_tag, vector)
    -
    +

    Avalanche tests

    +
    +
    +class AvalancheTests(cipher)
    +

    Bases: object

    +
    +
    +avalanche_probability_vectors(nb_samples)
    +

    Return the avalanche probability vectors of each input bit difference for each round.

    +

    The inputs considered are plaintext, key, etc.

    +

    The i-th component of the vector is the probability that i-th bit of the output +flips due to the input bit difference.

    +
    +

    Note

    +

    apvs[“key”][“round_output”][i][j] +The vector returned corresponds to the probablity of flipping of each output bits after j+1 rounds when the +difference is injected in position i in the key.

    +
    +

    INPUT:

    +
      +
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    +sage: from claasp.cipher_modules.avalanche_tests import AvalancheTests
    +sage: test = AvalancheTests(speck)
    +sage: apvs = test.avalanche_probability_vectors(100)
    +sage: apvs["plaintext"]["round_output"][0][3] # random
    +
    +
    +
    + +
    +
    +avalanche_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    +

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    +

    INPUT:

    +
      +
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • +
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability +of flipping should be

    • +
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied +for a given input bit difference if for all output bits of the round under analysis, the corresponding +avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • +
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is +satisfied for a given input bit difference if for all output bits of the round under analysis, the +corresponding avalanche dependence uniform criterion d is such that +block_bit_size - bias <= d <= block_bit_size + bias

    • +
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is +satisfied for a given input bit difference if for all output bits of the round under analysis, the +corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • +
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is +satisfied for a given input bit difference if for all output bits of the round under analysis, the +corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • +
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results +to the output dictionary

    • +
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence +uniform results to the output dictionary

    • +
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the +output dictionary

    • +
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the +output dictionary

    • +
    +
    +

    Note

    +

    d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][i][“vectors”][j] +The vector returned by this command correspond to the avalanche entropy after j+1 rounds, when an input +difference has been injected in position i in the plaintext.

    +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    +sage: from claasp.cipher_modules.avalanche_tests import AvalancheTests
    +sage: test = AvalancheTests(speck)
    +sage: d = test.avalanche_tests(number_of_samples=100)
    +sage: d["test_results"]["key"]["round_output"]["avalanche_dependence_vectors"][0]["vectors"][1] # random
    +
    +
    +
    + +
    +
    +compute_criterion_from_avalanche_probability_vectors(all_avalanche_probability_vectors, avalanche_dependence_uniform_bias)
    +

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    +

    ALGORITHM:

    +

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, +for a given round. +If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain +threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    +

    The avalanche dependence uniform is the number of output bit that flip with a probability +\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], +with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a +certain round is close to the output bit size with respect to a certain threshold, +we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    +

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit +difference, for a given round. +If the avalanche weights of all the input bit differences for a certain round is close to half of +the output bit size with respect to a certain threshold, we say that the cipher satisfies the +avalanche criterion for this round.

    +

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input +bit difference, for a given round. +If the strict avalanche entropy of all the input bit differences for a certain round is close to +the output bit size with respect to a certain threshold, we say that the cipher satisfies the +strict avalanche criterion for this round.

    +
    +

    Note

    +

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size +with input diff injected in key

    +
    +

    INPUT:

    +
      +
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • +
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • +
    +
    +

    See also

    +

    avalanche_probability_vectors() for the returning vectors.

    +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    +sage: from claasp.cipher_modules.avalanche_tests import AvalancheTests
    +sage: test = AvalancheTests(speck)
    +sage: apvs = test.avalanche_probability_vectors(100)
    +sage: d = test.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2) # random
    +
    +
    +
    + +
    +
    +generate_3D_plot(number_of_samples=100, criterion='avalanche_weight_vectors')
    +

    Return an object that can be plot to visualize the results of the avalanche properties in a 3D graph.

    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: cipher = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    +sage: from claasp.cipher_modules.avalanche_tests import AvalancheTests
    +sage: plot = AvalancheTests(cipher).generate_3D_plot(number_of_samples=100)
    +sage: type(plot)
    +<class 'module'>
    +
    +sage: from claasp.ciphers.permutations.chacha_permutation import ChachaPermutation
    +sage: cipher = ChachaPermutation(number_of_rounds=5)
    +sage: from claasp.cipher_modules.avalanche_tests import AvalancheTests
    +sage: plot = AvalancheTests(cipher).generate_3D_plot(number_of_samples=100)
    +sage: type(plot)
    +<class 'module'>
    +
    +
    +
    -
    -
    -set_vector_weight(criterion, input_diff, input_tag, number_of_occurrence, output_tag, vector)
    -
    +
    @@ -188,13 +222,13 @@

    Navigation

    This Page

    @@ -212,7 +246,7 @@

    Quick search

    - +
    @@ -227,10 +261,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -238,7 +272,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/code_generator.html b/docs/build/html/cipher_modules/code_generator.html index a3c19a97..8ec047ec 100644 --- a/docs/build/html/cipher_modules/code_generator.html +++ b/docs/build/html/cipher_modules/code_generator.html @@ -1,23 +1,24 @@ - + - Code generator — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Code generator — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Code generator

    +

    Code generator

    build_code_for_components(cipher, cipher_code_string, i, verbosity)
    @@ -82,11 +83,6 @@

    Navigation

    constant_to_bitstring(val, output_size)
    -
    -
    -constant_to_repr(val, output_size)
    -
    -
    delete_generated_evaluate_c_shared_library(cipher)
    @@ -128,7 +124,7 @@

    Navigation

    -generate_byte_based_vectorized_python_code_string(cipher, store_intermediate_outputs=False, verbosity=False)
    +generate_byte_based_vectorized_python_code_string(cipher, store_intermediate_outputs=False, verbosity=False, integers_inputs_and_outputs=False)

    Return string python code needed to evaluate a cipher using a vectorized implementation byte based oriented.

    INPUT:

    @@ -358,10 +354,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -369,7 +365,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/component_analysis_tests.html b/docs/build/html/cipher_modules/component_analysis_tests.html index 4a27d576..be9b8715 100644 --- a/docs/build/html/cipher_modules/component_analysis_tests.html +++ b/docs/build/html/cipher_modules/component_analysis_tests.html @@ -1,23 +1,24 @@ - + - Component analysis tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Component analysis tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,80 +57,72 @@

    Navigation

    -

    Component analysis tests

    -
    -
    -AND_as_boolean_function(component, boolean_polynomial_ring)
    -

    Return a list of boolean polynomials corresponding to the output bits of a AND component.

    -

    INPUT:

    -
      -
    • componentComponent object; a component from the cipher

    • -
    • boolean_polynomial_ringBoolean Polynomial Ring object; a boolean polynomial ring

    • -
    +

    Component analysis tests

    +
    +
    +class CipherComponentsAnalysis(cipher)
    +

    Bases: object

    +
    +
    +component_analysis_tests()
    +

    Return a list of properties for all the operation used in a cipher

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import (AND_as_boolean_function,
    -....: generate_boolean_polynomial_ring_from_cipher)
    +sage: from claasp.cipher_modules.component_analysis_tests import CipherComponentsAnalysis
     sage: fancy = FancyBlockCipher(number_of_rounds=3)
    -sage: and_component = fancy.get_component_from_id('and_0_8')
    -sage: boolean_polynomial_ring = generate_boolean_polynomial_ring_from_cipher(fancy)
    -sage: boolean_polynomials = AND_as_boolean_function(and_component, boolean_polynomial_ring)
    -sage: len(boolean_polynomials)
    -12
    +sage: components_analysis = CipherComponentsAnalysis(fancy).component_analysis_tests()
    +sage: len(components_analysis)
    +9
     
    -
    -
    -MODADD_as_boolean_function(component, boolean_polynomial_ring)
    -

    Return a list of boolean polynomials corresponding to the output bits of a MODADD component.

    +
    +
    +get_all_operations()
    +

    Return a dictionary for which the keys are all the operations that are used in the cipher.

    +
    +
    The attributes are a list containing:
      +
    • a component with the operation under study;

    • +
    • number of occurrences of the operation;

    • +
    • list of ids of all the components with the same underlying operation.

    • +
    +
    +

    INPUT:

      -
    • componentComponent object; a component from the cipher

    • -
    • boolean_polynomial_ringBoolean Polynomial Ring object; a boolean polynomial ring

    • +
    • cipherCipher object; a cipher instance

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import (MODADD_as_boolean_function,
    -....: generate_boolean_polynomial_ring_from_cipher)
    +sage: from claasp.cipher_modules.component_analysis_tests import CipherComponentsAnalysis
     sage: fancy = FancyBlockCipher(number_of_rounds=3)
    -sage: modadd_component = fancy.get_component_from_id('modadd_1_9')
    -sage: boolean_polynomial_ring = generate_boolean_polynomial_ring_from_cipher(fancy)
    -sage: boolean_polynomials = MODADD_as_boolean_function(modadd_component, boolean_polynomial_ring)
    -sage: len(boolean_polynomials)
    -6
    +sage: cipher_operations = CipherComponentsAnalysis(fancy).get_all_operations()
    +sage: list(cipher_operations.keys())
    +['sbox', 'linear_layer', 'XOR', 'AND', 'MODADD', 'ROTATE', 'SHIFT']
     
    -
    -
    -XOR_as_boolean_function(component, boolean_polynomial_ring)
    -

    Return a list of boolean polynomials corresponding to the output bits of a XOR component.

    -

    INPUT:

    -
      -
    • componentComponent object; a component from the cipher

    • -
    • boolean_polynomial_ringBoolean Polynomial Ring object; a boolean polynomial ring

    • -
    +
    +
    +print_component_analysis_as_radar_charts(results=None)
    +

    Return a graph that can be plot to visualize the properties of all the operations of a cipher in a spider graph

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import (XOR_as_boolean_function,
    -....: generate_boolean_polynomial_ring_from_cipher)
    +sage: from claasp.cipher_modules.component_analysis_tests import CipherComponentsAnalysis
     sage: fancy = FancyBlockCipher(number_of_rounds=3)
    -sage: xor_component = fancy.get_component_from_id('xor_2_7')
    -sage: boolean_polynomial_ring = generate_boolean_polynomial_ring_from_cipher(fancy)
    -sage: boolean_polynomials = XOR_as_boolean_function(xor_component, boolean_polynomial_ring)
    -sage: len(boolean_polynomials)
    -12
    +sage: CipherComponentsAnalysis(fancy).print_component_analysis_as_radar_charts()
    +
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=3)
    +sage: from claasp.cipher_modules.component_analysis_tests import CipherComponentsAnalysis
    +sage: CipherComponentsAnalysis(speck).print_component_analysis_as_radar_charts()
     
    -
    -
    -add_attributes_to_operation(cipher_operations, operation, tmp_cipher_operations)
    -
    +
    @@ -176,16 +169,6 @@

    Navigation

    -
    -
    -calculate_carry_for_three_blocks(boolean_polynomial_ring, output_bit_size, variables_names)
    -
    - -
    -
    -calculate_carry_for_two_blocks(boolean_polynomial_ring, output_bit_size, variables_names)
    -
    -
    calculate_weights_for_linear_layer(component, format, type)
    @@ -196,21 +179,6 @@

    Navigation

    calculate_weights_for_mix_column(component, format, type)
    -
    -
    -collect_component_operations(component, tmp_cipher_operations)
    -
    - -
    -
    -collect_components_with_the_same_operation(operation, tmp_cipher_operations)
    -
    - -
    -
    -component_analysis_tests(cipher)
    -
    -
    field_element_matrix_to_integer_matrix(matrix)
    @@ -241,55 +209,6 @@

    Navigation

    -
    -
    -fill_area(ax, categories, plot_number, positions, results)
    -
    - -
    -
    -generate_boolean_polynomial_ring_from_cipher(cipher)
    -

    Return the boolean polynomial ring for which the variables correspond to all input bits of each cipher component.

    -

    INPUT:

    -
      -
    • cipherCipher object; a cipher instance

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import generate_boolean_polynomial_ring_from_cipher
    -sage: fancy = FancyBlockCipher(number_of_rounds=3)
    -sage: boolean_polynomial_ring = generate_boolean_polynomial_ring_from_cipher(fancy)
    -
    -
    -
    - -
    -
    -get_all_operations(cipher)
    -

    Return a dictionary for which the keys are all the operations that are used in the cipher.

    -
    -
    The attributes are a list containing:
      -
    • a component with the operation under study;

    • -
    • number of occurrences of the operation;

    • -
    • list of ids of all the components with the same underlying operation.

    • -
    -
    -
    -

    INPUT:

    -
      -
    • cipherCipher object; a cipher instance

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import get_all_operations
    -sage: fancy = FancyBlockCipher(number_of_rounds=3)
    -sage: cipher_operations = get_all_operations(fancy)
    -sage: list(cipher_operations.keys())
    -['sbox', 'linear_layer', 'XOR', 'AND', 'MODADD', 'ROTATE', 'SHIFT']
    -
    -
    -
    -
    get_inverse_matrix_in_integer_representation(component)
    @@ -313,7 +232,9 @@

    Navigation

    sage: from claasp.cipher_modules.component_analysis_tests import get_inverse_matrix_in_integer_representation sage: midori = MidoriBlockCipher(number_of_rounds=3) sage: mix_column_component = midori.get_component_from_id('mix_column_0_20') -sage: get_inverse_matrix_in_integer_representation(mix_column_component) +sage: m = get_inverse_matrix_in_integer_representation(mix_column_component) +sage: m.dimensions() +(16,16)
    @@ -357,204 +278,35 @@

    Navigation

    -
    -
    -initialise_spider_plot(plot_number, results)
    -
    -
    instantiate_matrix_over_correct_field(matrix, polynomial_as_int, word_size, input_bit_size, output_bit_size)
    -

    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher -sage: from claasp.cipher_modules.component_analysis_tests import instantiate_matrix_over_correct_field, field_element_matrix_to_integer_matrix -sage: midori = MidoriBlockCipher(number_of_rounds=2) -sage: mix_column_component = midori.get_component_from_id(‘mix_column_0_20’) -sage: description = mix_column_component.description -sage: mc_matrix, _ = instantiate_matrix_over_correct_field(description[0], int(description[1]), int(description[2]),

    -
    -

    mix_column_component.input_bit_size, mix_column_component.output_bit_size)

    -
    -

    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher -sage: from claasp.cipher_modules.component_analysis_tests import instantiate_matrix_over_correct_field, field_element_matrix_to_integer_matrix -sage: midori = MidoriBlockCipher(number_of_rounds=2) -sage: mix_column_component = midori.get_component_from_id(‘mix_column_0_21’) -sage: description = mix_column_component.description -sage: mc_matrix, _ = instantiate_matrix_over_correct_field(description[0], int(description[1]), int(description[2]),

    -
    -

    mix_column_component.input_bit_size, mix_column_component.output_bit_size)

    -
    -
    - -
    -
    -int_to_poly(integer_value, word_size, variable)
    -
    - -
    -
    -is_mds(component)
    -

    A matrix is MDS if and only if all the minors (determinants of square submatrices) are non-zero

    -

    INPUT:

    -
      -
    • componentComponent object; a component from the cipher

    • -
    +

    Return a binary matrix based on the description of a component.

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.twofish_block_cipher import TwofishBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import is_mds
    -sage: twofish = TwofishBlockCipher(number_of_rounds=2)
    -sage: mix_column_component = twofish.get_component_from_id('mix_column_0_19')
    -sage: is_mds(mix_column_component)
    -True
    -
    -sage: from claasp.ciphers.block_ciphers.skinny_block_cipher import SkinnyBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import is_mds
    -sage: skinny = SkinnyBlockCipher(block_bit_size=128, key_bit_size=384, number_of_rounds=40)
    -sage: mix_column_component = skinny.get_component_from_id('mix_column_0_31')
    -sage: is_mds(mix_column_component)
    -False
    -
    -sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import is_mds
    -sage: aes = AESBlockCipher(number_of_rounds=3)
    -sage: mix_column_component = aes.get_component_from_id('mix_column_1_20')
    -sage: is_mds(mix_column_component)
    -True
    -
    -
    -
    - -
    -
    -linear_layer_properties(operation)
    -

    Return a dictionary containing some properties of the linear layer operation under study.

    -

    INPUT:

    -
      -
    • operationlist; a list containing:

      -
        -
      • a component with the operation under study

      • -
      • number of occurrences of the operation

      • -
      • list of ids of all the components with the same underlying operation

      • -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.component_analysis_tests import linear_layer_properties
    -sage: from claasp.components.rotate_component import Rotate
    -sage: rot_component = Rotate(1, 11, ['sbox_1_1', 'sbox_1_2'], [[2, 3], [0, 1, 2, 3]], 6, -3)
    -sage: operation = [rot_component, 1, ['rot_1_11']]
    -sage: d = linear_layer_properties(operation)
    -sage: d["properties"]["differential_branch_number"]["value"]
    -2
    -
    -
    -
    - -
    -
    -order_of_linear_component(component)
    -

    Return the multiplicative order of a linear component

    -

    INPUT:

    -
      -
    • componentComponent object; a component from the cipher

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import order_of_linear_component
    -sage: fancy = FancyBlockCipher(number_of_rounds=3)
    -sage: rot_component = fancy.get_component_from_id('rot_1_11')
    -sage: order_of_linear_component(rot_component)
    -2
    -
    -
    -
    - -
    -
    -plot_first_line_of_data_frame(categories, plot_number, results)
    -
    - -
    -
    -print_component_analysis_as_radar_charts(results)
    -
    - -
    -
    -remove_components_with_strings_as_values(results_without_xor)
    -
    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: from claasp.cipher_modules.component_analysis_tests import instantiate_matrix_over_correct_field
    +sage: midori = MidoriBlockCipher(number_of_rounds=2)
    +sage: mix_column_component = midori.get_component_from_id('mix_column_0_20')
    +sage: description = mix_column_component.description
    +sage: mc_matrix, _ = instantiate_matrix_over_correct_field(description[0], int(description[1]), int(description[2]),
    +                                                 mix_column_component.input_bit_size, mix_column_component.output_bit_size)
     
    -
    -
    -sbox_properties(operation)
    -

    Return a dictionary containing some properties of Sbox component.

    -

    INPUT:

    -
      -
    • operationlist; a list containing:

      -
        -
      • a component with the operation under study

      • -
      • number of occurrences of the operation

      • -
      • list of ids of all the components with the same underlying operation

      • -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.component_analysis_tests import sbox_properties
    -sage: from claasp.components.sbox_component import SBOX
    -sage: sbox_component = SBOX(0, 0, ['plaintext'], [[0, 1, 2, 3]], 4, [0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15])
    -sage: operation = [sbox_component, 12, ['sbox_0_0', 'sbox_0_1', 'sbox_0_2', 'sbox_0_3', 'sbox_0_4', 'sbox_0_5',
    -....: 'sbox_1_0', 'sbox_1_1', 'sbox_1_2', 'sbox_1_3', 'sbox_1_4', 'sbox_1_5']]
    -sage: d = sbox_properties(operation)
    -sage: d["properties"]["boomerang_uniformity"]["value"]
    -16
    +sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: from claasp.cipher_modules.component_analysis_tests import instantiate_matrix_over_correct_field
    +sage: midori = MidoriBlockCipher(number_of_rounds=2)
    +sage: mix_column_component = midori.get_component_from_id('mix_column_0_21')
    +sage: description = mix_column_component.description
    +sage: mc_matrix, _ = instantiate_matrix_over_correct_field(description[0], int(description[1]), int(description[2]),
    +                                                 mix_column_component.input_bit_size, mix_column_component.output_bit_size)
     
    -
    -select_boolean_function(component, boolean_polynomial_ring)
    -
    - -
    -
    -select_properties_function(boolean_polynomial_ring, operation)
    -
    - -
    -
    -set_variables_names(component, number_of_inputs)
    +
    +int_to_poly(integer_value, word_size, variable)
    -
    -
    -word_operation_properties(operation, boolean_polynomial_ring)
    -

    Return a dictionary containing some properties of word operation component.

    -

    INPUT:

    -
      -
    • operationlist; a list containing:

      -
        -
      • a component with the operation under study

      • -
      • number of occurrences of the operation

      • -
      • list of ids of all the components with the same underlying operation

      • -
      -
    • -
    • boolean_polynomial_ringBoolean Polynomial Ring object; a boolean polynomial ring

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    -sage: from claasp.cipher_modules.component_analysis_tests import (word_operation_properties,
    -....: generate_boolean_polynomial_ring_from_cipher)
    -sage: fancy = FancyBlockCipher(number_of_rounds=3)
    -sage: modadd_component = fancy.component_from(1, 9)
    -sage: operation = [modadd_component, 2, ['modadd_1_9', 'modadd_1_10']]
    -sage: boolean_polynomial_ring = generate_boolean_polynomial_ring_from_cipher(fancy)
    -sage: d = word_operation_properties(operation, boolean_polynomial_ring)
    -sage: d["properties"]["degree"]["value"]
    -4.5
    -
    -
    -
    - @@ -566,13 +318,13 @@

    Navigation

    This Page

    @@ -590,7 +342,7 @@

    Quick search

    - +
    @@ -605,10 +357,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -616,7 +368,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/continuous_diffusion_analysis.html b/docs/build/html/cipher_modules/continuous_diffusion_analysis.html new file mode 100644 index 00000000..5b44f67c --- /dev/null +++ b/docs/build/html/cipher_modules/continuous_diffusion_analysis.html @@ -0,0 +1,336 @@ + + + + + + + + + Continuous diffusion analysis — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Continuous diffusion analysis

    +
    +
    +class ContinuousDiffusionAnalysis(cipher)
    +

    Bases: object

    +
    +
    +continuous_avalanche_factor(lambda_value, number_of_samples)
    +

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    +

    INPUT:

    +
      +
    • lambda_valuefloat; threshold value used to express the input difference

    • +
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from claasp.cipher_modules.continuous_diffusion_analysis import ContinuousDiffusionAnalysis
    +sage: speck_cipher = speck(number_of_rounds=2)
    +sage: cda = ContinuousDiffusionAnalysis(speck_cipher)
    +sage: result = cda.continuous_avalanche_factor(0.001, 10)
    +sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]
    +0.0
    +
    +
    +
    + +
    +
    +continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    +

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    +

    INPUT:

    +
      +
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • +
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from claasp.cipher_modules.continuous_diffusion_analysis import ContinuousDiffusionAnalysis
    +sage: speck_cipher = speck(number_of_rounds=2) # long time
    +sage: cda = ContinuousDiffusionAnalysis(speck_cipher)
    +sage: output = cda.continuous_diffusion_factor(5, 20) # long time
    +sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0] > 0 # long time
    +True
    +
    +
    +
    + +
    +
    +continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    +

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    +

    INPUT:

    +
      +
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples +used to obtain the metric continuous_avalanche_factor

    • +
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the +input difference for the metric continuous_avalanche_factor

    • +
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples +used to compute the continuous measure metric

    • +
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used +to approximate gf_2

    • +
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples +used to compute the continuous measure metric

    • +
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors +used to approximate gf_2

    • +
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the +continuous_avalanche_factor or not

    • +
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the +continuous_neutrality_measure or not

    • +
    • is_diffusion_factorboolean (default: True); flag indicating if we want the +continuous_neutrality_measure, or not

    • +
    +

    OUTPUT:

    +
    +
      +
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, +continuous_avalanche_factor, diffusion_factor

    • +
    +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from claasp.cipher_modules.continuous_diffusion_analysis import ContinuousDiffusionAnalysis
    +sage: speck_cipher = speck(number_of_rounds=1) # long time
    +sage: cda = ContinuousDiffusionAnalysis(speck_cipher)
    +sage: output = cda.continuous_diffusion_tests() # long time
    +sage: output["test_results"]['plaintext']['round_key_output']['continuous_neutrality_measure'][0]['values'][0] == 0.0  # long time
    +True
    +
    +
    +
    + +
    +
    +continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    +

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    +

    INPUT:

    +
      +
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • +
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • +
    • input_bitinteger (default: None); input bit position to be analyzed

    • +
    • output_bitslist (default: None); output bit positions to be analyzed

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from claasp.cipher_modules.continuous_diffusion_analysis import ContinuousDiffusionAnalysis
    +sage: speck_cipher = speck(number_of_rounds=2)
    +sage: cda = ContinuousDiffusionAnalysis(speck_cipher)
    +sage: output = cda.continuous_neutrality_measure_for_bit_j(50, 200) # long time
    +sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    +True
    +
    +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/evaluator.html b/docs/build/html/cipher_modules/evaluator.html index 7cda1e24..8da7c417 100644 --- a/docs/build/html/cipher_modules/evaluator.html +++ b/docs/build/html/cipher_modules/evaluator.html @@ -1,23 +1,24 @@ - + - Evaluator — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Evaluator — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Evaluator

    +

    Evaluator

    evaluate(cipher, cipher_input, intermediate_output=False, verbosity=False)
    @@ -69,7 +70,7 @@

    Navigation

    -evaluate_vectorized(cipher, cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher, cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)
    @@ -88,13 +89,13 @@

    Navigation

    This Page

    @@ -112,7 +113,7 @@

    Quick search

    - +
    @@ -127,10 +128,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -138,7 +139,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/generic_bit_based_c_functions.html b/docs/build/html/cipher_modules/generic_bit_based_c_functions.html index 7dec7a35..1c956a80 100644 --- a/docs/build/html/cipher_modules/generic_bit_based_c_functions.html +++ b/docs/build/html/cipher_modules/generic_bit_based_c_functions.html @@ -1,23 +1,24 @@ - + - Generic bit based c functions — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generic bit based c functions — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generic bit based c functions

    +

    Generic bit based c functions

    @@ -68,13 +69,13 @@

    Generic bit based c functions

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/generic_functions.html b/docs/build/html/cipher_modules/generic_functions.html index 06c0c3c9..20a7f94e 100644 --- a/docs/build/html/cipher_modules/generic_functions.html +++ b/docs/build/html/cipher_modules/generic_functions.html @@ -1,23 +1,24 @@ - + - Generic functions — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generic functions — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generic functions

    +

    Generic functions

    AND(input, number_of_inputs, verbosity=False)
    @@ -506,13 +507,13 @@

    Navigation

    Previous topic

    -

    Inverse cipher

    +

    Avalanche tests

    This Page

    @@ -530,7 +531,7 @@

    Quick search

    - +
    @@ -545,10 +546,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -556,7 +557,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/generic_functions_continuous_diffusion_analysis.html b/docs/build/html/cipher_modules/generic_functions_continuous_diffusion_analysis.html index 331f2224..8e821ebd 100644 --- a/docs/build/html/cipher_modules/generic_functions_continuous_diffusion_analysis.html +++ b/docs/build/html/cipher_modules/generic_functions_continuous_diffusion_analysis.html @@ -1,23 +1,24 @@ - + - Generic functions continuous diffusion analysis — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generic functions continuous diffusion analysis — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generic functions continuous diffusion analysis

    +

    Generic functions continuous diffusion analysis

    AND_continuous_diffusion_analysis(input_lst, number_of_inputs)
    @@ -447,13 +448,13 @@

    Navigation

    Previous topic

    -

    Code generator

    +

    Modular component

    This Page

    @@ -471,7 +472,7 @@

    Quick search

    - +
    @@ -486,10 +487,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -497,7 +498,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/generic_functions_vectorized_bit.html b/docs/build/html/cipher_modules/generic_functions_vectorized_bit.html index 7c3643e9..db833799 100644 --- a/docs/build/html/cipher_modules/generic_functions_vectorized_bit.html +++ b/docs/build/html/cipher_modules/generic_functions_vectorized_bit.html @@ -1,23 +1,24 @@ - + - Generic functions vectorized bit — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generic functions vectorized bit — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generic functions vectorized bit

    +

    Generic functions vectorized bit

    bit_vector_AND(input, number_of_inputs, output_bit_size, verbosity=False)
    @@ -150,7 +151,7 @@

    Navigation

    -bit_vector_SBOX(input, sbox, verbosity=False)
    +bit_vector_SBOX(input, sbox, verbosity=False, output_bit_size=None)

    Computes the SBox operation on binary values.

    INPUT:

    @@ -295,13 +296,13 @@

    Navigation

    This Page

    @@ -319,7 +320,7 @@

    Quick search

    - +
    @@ -334,10 +335,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -345,7 +346,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/generic_functions_vectorized_byte.html b/docs/build/html/cipher_modules/generic_functions_vectorized_byte.html index 780c5d3d..6599de35 100644 --- a/docs/build/html/cipher_modules/generic_functions_vectorized_byte.html +++ b/docs/build/html/cipher_modules/generic_functions_vectorized_byte.html @@ -1,23 +1,24 @@ - + - Generic functions vectorized byte — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generic functions vectorized byte — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,10 +57,10 @@

    Navigation

    -

    Generic functions vectorized byte

    +

    Generic functions vectorized byte

    -byte_vector_AND(input, verbosity=False)
    +byte_vector_AND(input)

    Computes the result of the AND operation

    INPUT:

    INPUT: @@ -67,36 +68,31 @@

    Navigation

    sample.

    -
      -
    • verbosityboolean; (default: False); set this flag to True to print the input/output.

    • -
    -byte_vector_MODADD(input, verbosity=False)
    +byte_vector_MODADD(input)

    Computes the result of the MODADD operation.

    INPUT:

    • inputlist; A list of numpy byte matrices to be added, each with one row per byte, and one column per sample.

    • -
    • verbosityboolean; (default: False); set this flag to True to print the input/output

    -byte_vector_MODSUB(input, verbosity=False)
    +byte_vector_MODSUB(input)

    Computes the result of the MODSUB operation.

    INPUT:

    • inputlist; A list of 2 numpy byte matrices to be subtracted, each with one row per byte, and one column per sample.

    • -
    • verbosityboolean; (default: False); set this flag to True to print the input/output

    -byte_vector_NOT(input, verbosity=False)
    +byte_vector_NOT(input)

    Computes the result of the NOT operation.

    INPUT:

      @@ -105,13 +101,12 @@

      Navigation

    -
  • verbosityboolean; (default: False); set this flag to True to print the input/output

  • -byte_vector_OR(input, verbosity=False)
    +byte_vector_OR(input)

    Computes the result of the OR operation.

    INPUT:

    INPUT: @@ -119,14 +114,11 @@

    Navigation

    sample.

    -
      -
    • verbosityboolean; (default: False); set this flag to True to print the input/output.

    • -
    -byte_vector_ROTATE(input, rotation_amount, verbosity=False)
    +byte_vector_ROTATE(input, rotation_amount, input_bit_size)

    Computes the result of the bitwise ROTATE operation.

    INPUT:

      @@ -137,25 +129,23 @@

      Navigation

    -
  • verbosityboolean; (default: False); set this flag to True to print the input/output

  • -byte_vector_SBOX(val, sbox, verbosity=False)
    +byte_vector_SBOX(val, sbox, input_bit_size)

    Computes the result of the SBox operation.

    INPUT:

    • valnp.array(dtype = np.uint8) A numpy matrix with one row per byte and one column per sample.

    • sboxnp.array(dtype = np.uint8) An integer numpy array representing the SBox.

    • -
    • verbosityboolean; (default: False); set this flag to True to print the input/output

    -byte_vector_SHIFT(input, shift_amount, verbosity=False)
    +byte_vector_SHIFT(input, shift_amount)

    Computes the result of the bitwise SHIFT operation.

    INPUT:

      @@ -166,13 +156,12 @@

      Navigation

    -
  • verbosityboolean; (default: False); set this flag to True to print the input/output

  • -byte_vector_SHIFT_BY_VARIABLE_AMOUNT(input, input_size, shift_direction, verbosity=False)
    +byte_vector_SHIFT_BY_VARIABLE_AMOUNT(input, input_size, shift_direction)

    Computes the bitwise shift by variable amount operation.

    INPUT:

      @@ -183,22 +172,18 @@

      Navigation

    -
  • verbosityboolean; (default: False); set this flag to True to print the input/output

  • -byte_vector_XOR(input, verbosity=False)
    +byte_vector_XOR(input)

    Computes the result of the XOR operation.

    INPUT: - inputlist; A list of numpy byte matrices to be XORed, each with one row per byte, and one column per

    sample.

    -
      -
    • verbosityboolean; (default: False); set this flag to True to print the input/output

    • -
    @@ -231,26 +216,24 @@

    Navigation

    -byte_vector_mix_column(input, matrix, mul_table, verbosity=False)
    +byte_vector_mix_column(input, matrix, mul_table, word_size)

    Computes the mix_column operation.

    INPUT:

    • inputnp.array(dtype = np.uint8) A numpy matrix with one row per byte, and one column per sample.

    • matrixlist; a list of lists of integers

    • mul_tablesdictionary; a dictionary giving the multiplication table by x at key x

    • -
    • verbosityboolean; (default: False); set this flag to True to print the input/output

    -byte_vector_mix_column_poly0(input, matrix, verbosity=False)
    +byte_vector_mix_column_poly0(input, matrix, word_size)

    Computes the mix_column operation, special case where poly=0.

    INPUT:

    • inputnp.array(dtype = np.uint8) A numpy matrix with one row per byte, and one column per byte.

    • matrixlist; a list of lists of integers

    • -
    • verbosityboolean; (default: False); set this flag to True to print the input/output

    @@ -267,7 +250,7 @@

    Navigation

    -byte_vector_select_all_words(unformatted_inputs, real_bits, real_inputs, number_of_inputs, words_per_input, actual_inputs_bits, verbosity=False)
    +byte_vector_select_all_words(unformated_inputs, real_bits, real_inputs, number_of_inputs, words_per_input, actual_inputs_bits)

    Parses the inputs from the cipher into a list of numpy byte arrays, each corresponding to one input to the function.

    INPUT:

      @@ -285,20 +268,72 @@

      Navigation

    • number_of_inputsinteger; an integer representing the number of inputs expected by the operation

    • words_per_inputinteger; the number of 8-bit words to be reserved for each of the inputs

    • actual_inputs_bitsinteger; the bit size of the variables in unformatted_inputs

    • -
    • verbosityboolean; (default: False); set this flag to True to print the input/output

    +
    +
    +cipher_inputs_to_evaluate_vectorized_inputs(cipher_inputs, cipher_inputs_bit_size)
    +

    Converts cipher_inputs from integers to the format expected by evaluate_vectorized. +If cipher_inputs is a list of integers (one per input position), then the function returns a list of numpy matrices +that can be used to evaluate a single set of inputs to the cipher (with a similar api to cipher.evaluate). +If cipher_inputs is a list of lists of integers (one per input position), then the function returns a list of numpy +matrices that can be used to evaluate multiple set of inputs to the cipher. +The produced matrices contain one row per byte, and one column per value. +If needed, the values are padded with zeroes on the left.

    +

    INPUT: +- cipher_inputslist A list of lists of integers (one per cipher input position) +- cipher_inputs_bit_sizelist The inputs bit sizes of the cipher.

    +
    + +
    +
    +evaluate_vectorized_outputs_to_integers(evaluate_vectorized_outputs, cipher_output_bit_size)
    +

    Converts the outputs of evaluate_vectorized (a list containing a single numpy matrix) to a list of integers +(one per output/row of the matrix)

    +

    INPUT: +- evaluate_vectorized_outputslist A list containing one numpy array returned by evaluate_vectorized +- cipher_output_bit_sizeinteger The output bit size of the cipher

    +
    +
    generate_formatted_inputs(actual_inputs_bits, i, output, pos, real_bits, real_inputs, unformatted_inputs, words_per_input)
    -
    -print_component_info(input, output, component_type)
    +
    +get_number_of_bytes_needed_for_bit_size(bit_size)
    +
    +
    +get_number_of_consecutive_bits(l)
    +

    Return the number of consecutive numbers from the start of list l, in decreasing order.

    +

    INPUT:

    +
      +
    • llist; a list of bit positions, in reverse order

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.generic_functions_vectorized_byte import get_number_of_consecutive_bits
    +sage: L=[4, 3, 5, 7, 2]
    +sage: get_number_of_consecutive_bits(L) == 2
    +True
    +
    +
    +
    + +
    +
    +integer_array_to_evaluate_vectorized_input(values, bit_size)
    +

    Converts the bit_size integers from the values array to the representation accepted by evaluate_vectorized, a numpy matrix +of unsigned 8-bit integers (one row per byte, one column per value). If needed, the values are padded with zeroes +on the left. If the cipher takes multiple inputs, this function needs to be called once for each.

    +

    INPUT: +- valueslist A list of integers +- bit_sizeinteger The bit size of the elements of values.

    +
    +
    @@ -310,13 +345,13 @@

    Navigation

    This Page

    @@ -334,7 +369,7 @@

    Quick search

    - +
    @@ -349,10 +384,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -360,7 +395,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/generic_word_based_c_functions.html b/docs/build/html/cipher_modules/generic_word_based_c_functions.html index 8cb99830..83af84ff 100644 --- a/docs/build/html/cipher_modules/generic_word_based_c_functions.html +++ b/docs/build/html/cipher_modules/generic_word_based_c_functions.html @@ -1,23 +1,24 @@ - + - Generic word based c functions — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generic word based c functions — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generic word based c functions

    +

    Generic word based c functions

    @@ -68,13 +69,13 @@

    Generic word based c functions

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/graph_generator.html b/docs/build/html/cipher_modules/graph_generator.html index 100ad2a0..89fa2131 100644 --- a/docs/build/html/cipher_modules/graph_generator.html +++ b/docs/build/html/cipher_modules/graph_generator.html @@ -1,23 +1,24 @@ - + - Graph generator — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Graph generator — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Graph generator

    +

    Graph generator

    create_networkx_graph_from_input_ids(cipher)
    @@ -91,13 +92,13 @@

    Navigation

    Next topic

    -

    Inverse cipher

    +

    Tester

    This Page

    @@ -115,7 +116,7 @@

    Quick search

    - +
    @@ -130,10 +131,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -141,7 +142,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/inverse_cipher.html b/docs/build/html/cipher_modules/inverse_cipher.html index 1c0cc831..f7a26dce 100644 --- a/docs/build/html/cipher_modules/inverse_cipher.html +++ b/docs/build/html/cipher_modules/inverse_cipher.html @@ -1,23 +1,24 @@ - + - Inverse cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Inverse cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Inverse cipher

    +

    Inverse cipher

    add_bit_to_bit_list(bit, bit_list)
    @@ -220,7 +221,7 @@

    Navigation

    -get_relative_position(target_link, target_bit_positions, descendant)
    +get_relative_position(target_link, target_bit_positions, intermediate_output)
    @@ -318,13 +319,13 @@

    Navigation

    This Page

    @@ -342,7 +343,7 @@

    Quick search

    - +
    @@ -357,10 +358,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -368,7 +369,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/algebraic/algebraic_model.html b/docs/build/html/cipher_modules/models/algebraic/algebraic_model.html index c634a439..9fc14b6f 100644 --- a/docs/build/html/cipher_modules/models/algebraic/algebraic_model.html +++ b/docs/build/html/cipher_modules/models/algebraic/algebraic_model.html @@ -1,23 +1,24 @@ - + - Algebraic model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Algebraic model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Algebraic model

    +

    Algebraic model

    class AlgebraicModel(cipher)
    @@ -117,14 +118,14 @@

    Navigation

    Return True if the cipher is resistant against algebraic attack.

    INPUT:

      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • +
    • timeoutinteger; the timeout for the Groebner basis computation in seconds

    EXAMPLES:

    sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: algebraic = AlgebraicModel(identity)
    -sage: algebraic.is_algebraically_secure(120)
    +sage: from claasp.ciphers.toys.toyspn1 import ToySPN1
    +sage: toyspn = ToySPN1()
    +sage: algebraic = AlgebraicModel(toyspn)
    +sage: algebraic.is_algebraically_secure(30)
     False
     
    @@ -157,18 +158,48 @@

    Navigation

  • None

  • EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +
    sage: from claasp.ciphers.toys.toyspn1 import ToySPN1
    +sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
    +sage: toyspn = ToySPN1()
    +sage: AlgebraicModel(toyspn).polynomial_system()
    +Polynomial Sequence with 74 Polynomials in 42 Variables
    +
    +sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
     sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
     sage: fancy = FancyBlockCipher(number_of_rounds=1)
    -sage: AlgebraicModel(fancy).polynomial_system()  # long time
    -Polynomial Sequence with 468 Polynomials in 384 Variables
    +sage: AlgebraicModel(fancy).polynomial_system()
    +Polynomial Sequence with 228 Polynomials in 144 Variables
    +
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=2)
    +sage: AlgebraicModel(speck).polynomial_system()
    +Polynomial Sequence with 192 Polynomials in 256 Variables
    +
    +sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
    +sage: aes = AESBlockCipher(word_size=4, state_size=2, number_of_rounds=1)
    +sage: AlgebraicModel(aes).polynomial_system()
    +Polynomial Sequence with 174 Polynomials in 104 Variables
    +
    +sage: from claasp.ciphers.block_ciphers.tea_block_cipher import TeaBlockCipher
    +sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
    +sage: tea = TeaBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=1)
    +sage: AlgebraicModel(tea).polynomial_system()
    +Polynomial Sequence with 288 Polynomials in 384 Variables
    +
    +sage: from claasp.ciphers.permutations.gift_permutation import GiftPermutation
    +sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
    +sage: gift = GiftPermutation(number_of_rounds=1)
    +sage: AlgebraicModel(gift).polynomial_system()
    +Polynomial Sequence with 448 Polynomials in 640 Variables
     
    -polynomial_system_at_round(r)
    +polynomial_system_at_round(r, method_call_flag=False)

    Return a polynomial system at round r.

    INPUT:

      @@ -178,8 +209,8 @@

      Navigation

      sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
       sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
       sage: fancy = FancyBlockCipher(number_of_rounds=1)
      -sage: AlgebraicModel(fancy).polynomial_system_at_round(0) # long time
      -Polynomial Sequence with 252 Polynomials in 288 Variables
      +sage: AlgebraicModel(fancy).polynomial_system_at_round(0)
      +Polynomial Sequence with 228 Polynomials in 144 Variables
       
    @@ -239,13 +270,13 @@

    Navigation

    Previous topic

    -

    Constraints

    +

    Utils

    This Page

    @@ -263,7 +294,7 @@

    Quick search

    - +
    @@ -278,10 +309,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -289,7 +320,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/algebraic/boolean_polynomial_ring.html b/docs/build/html/cipher_modules/models/algebraic/boolean_polynomial_ring.html index 0f4a2754..666ca6e4 100644 --- a/docs/build/html/cipher_modules/models/algebraic/boolean_polynomial_ring.html +++ b/docs/build/html/cipher_modules/models/algebraic/boolean_polynomial_ring.html @@ -1,23 +1,24 @@ - + - Boolean polynomial ring — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Boolean polynomial ring — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Boolean polynomial ring

    +

    Boolean polynomial ring

    is_boolean_polynomial_ring(R)
    @@ -89,13 +90,13 @@

    Navigation

    Previous topic

    -

    Algebraic model

    +

    Constraints

    This Page

    @@ -113,7 +114,7 @@

    Quick search

    - +
    @@ -128,10 +129,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -139,7 +140,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/algebraic/constraints.html b/docs/build/html/cipher_modules/models/algebraic/constraints.html index a35b008d..7e501304 100644 --- a/docs/build/html/cipher_modules/models/algebraic/constraints.html +++ b/docs/build/html/cipher_modules/models/algebraic/constraints.html @@ -1,23 +1,24 @@ - + - Constraints — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Constraints — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Constraints

    +

    Constraints

    equality_polynomials(x, y)
    @@ -241,13 +242,13 @@

    Navigation

    This Page

    @@ -265,7 +266,7 @@

    Quick search

    - +
    @@ -280,10 +281,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -291,7 +292,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/cp/cp_model.html b/docs/build/html/cipher_modules/models/cp/cp_model.html index e5b39b28..56bfda8e 100644 --- a/docs/build/html/cipher_modules/models/cp/cp_model.html +++ b/docs/build/html/cipher_modules/models/cp/cp_model.html @@ -1,23 +1,24 @@ - + - Cp model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cp model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Cp model

    +

    Cp model

    class CpModel(cipher)
    @@ -181,7 +182,7 @@

    Navigation

    -get_command_for_solver_process(input_file_path, model_type, solver_name)
    +get_command_for_solver_process(input_file_path, model_type, solver_name, num_of_processors, timelimit)
    @@ -232,7 +233,7 @@

    Navigation

    -solve(model_type, solver_name=None)
    +solve(model_type, solver_name='Chuffed', num_of_processors=None, timelimit=None)

    Return the solution of the model.

    INPUT:

      @@ -255,6 +256,8 @@

      Navigation

    • 'COIN-BC'

    +
  • num_of_processorsinteger; the number of processors to be used

  • +
  • timelimitinteger; time limit to output a result

  • EXAMPLES:

    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import CpXorDifferentialTrailSearchModel
    @@ -275,6 +278,11 @@ 

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    weight_constraints(weight)
    @@ -307,13 +315,13 @@

    Navigation

    Next topic

    -

    Usefulfunctions

    +

    Solvers

    This Page

    @@ -331,7 +339,7 @@

    Quick search

    - +
    @@ -346,10 +354,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -357,7 +365,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/cp/cp_models/cp_cipher_model.html b/docs/build/html/cipher_modules/models/cp/cp_models/cp_cipher_model.html index 062fdbe2..bba27cab 100644 --- a/docs/build/html/cipher_modules/models/cp/cp_models/cp_cipher_model.html +++ b/docs/build/html/cipher_modules/models/cp/cp_models/cp_cipher_model.html @@ -1,23 +1,24 @@ - + - Cp cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cp cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Cp cipher model

    +

    Cp cipher model

    class CpCipherModel(cipher)
    -

    Bases: claasp.cipher_modules.models.cp.cp_model.CpModel

    +

    Bases: CpModel

    add_solution_to_components_values(component_id, component_solution, components_values, j, output_to_parse, solution_number, string)
    @@ -73,7 +74,7 @@

    Navigation

    -build_cipher_model(fixed_variables=[])
    +build_cipher_model(fixed_variables=[], second=False)

    Build the cipher model.

    INPUT:

      @@ -147,6 +148,11 @@

      Navigation

      property cipher_id
    +
    +
    +evaluate_model(fixed_values=[], solver_name='Chuffed')
    +
    +
    final_constraints()
    @@ -231,7 +237,7 @@

    Navigation

    -get_command_for_solver_process(input_file_path, model_type, solver_name)
    +get_command_for_solver_process(input_file_path, model_type, solver_name, num_of_processors, timelimit)
    @@ -303,7 +309,7 @@

    Navigation

    -solve(model_type, solver_name=None)
    +solve(model_type, solver_name='Chuffed', num_of_processors=None, timelimit=None)

    Return the solution of the model.

    INPUT:

      @@ -326,6 +332,8 @@

      Navigation

    • 'COIN-BC'

    +
  • num_of_processorsinteger; the number of processors to be used

  • +
  • timelimitinteger; time limit to output a result

  • EXAMPLES:

    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import CpXorDifferentialTrailSearchModel
    @@ -346,6 +354,11 @@ 

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    weight_constraints(weight)
    @@ -378,13 +391,13 @@

    Navigation

    - +
    @@ -417,10 +430,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -428,7 +441,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/cp/cp_models/cp_deterministic_truncated_xor_differential_model.html b/docs/build/html/cipher_modules/models/cp/cp_models/cp_deterministic_truncated_xor_differential_model.html index a2a966b3..c9dda520 100644 --- a/docs/build/html/cipher_modules/models/cp/cp_models/cp_deterministic_truncated_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/cp/cp_models/cp_deterministic_truncated_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Cp deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cp deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Cp deterministic truncated xor differential model

    +

    Cp deterministic truncated xor differential model

    class CpDeterministicTruncatedXorDifferentialModel(cipher)
    -

    Bases: claasp.cipher_modules.models.cp.cp_model.CpModel

    +

    Bases: CpModel

    add_solution_to_components_values(component_id, component_solution, components_values, j, output_to_parse, solution_number, string)
    @@ -68,7 +69,7 @@

    Navigation

    -add_solutions_from_components_values(components_values, memory, model_type, solutions, solve_time, solver_name, solver_output, total_weight)
    +add_solutions_from_components_values(components_values, memory, model_type, solutions, solve_time, solver_name, solver_output)
    @@ -95,7 +96,7 @@

    Navigation

    -build_inverse_deterministic_truncated_xor_differential_trail_model(number_of_rounds, fixed_variables=[])
    +build_inverse_deterministic_truncated_xor_differential_trail_model(number_of_rounds=None, fixed_variables=[])

    Build CP model for search of deterministic truncated XOR differential trails for the inverted cipher.

    INPUT:

      @@ -181,7 +182,7 @@

      Navigation

      -final_impossible_constraints(number_of_rounds)
      +final_impossible_constraints(number_of_rounds=None)

      Return a CP constraints list for the cipher outputs and solving indications for single or second step model.

      INPUT:

        @@ -200,7 +201,7 @@

        Navigation

        -find_all_deterministic_truncated_xor_differential_trail(number_of_rounds, fixed_values=[], solver_name=None)
        +find_all_deterministic_truncated_xor_differential_trail(number_of_rounds=None, fixed_values=[], solver_name='Chuffed')

        Return the solution representing a differential trail with any weight.

        INPUT:

          @@ -246,7 +247,7 @@

          Navigation

          -find_one_deterministic_truncated_xor_differential_trail(number_of_rounds=None, fixed_values=[], solver_name=None)
          +find_one_deterministic_truncated_xor_differential_trail(number_of_rounds=None, fixed_values=[], solver_name='Chuffed')

          Return the solution representing a differential trail with any weight.

          INPUT:

            @@ -363,7 +364,7 @@

            Navigation

            -get_command_for_solver_process(input_file_path, model_type, solver_name)
            +get_command_for_solver_process(input_file_path, model_type, solver_name, num_of_processors, timelimit)
            @@ -383,7 +384,7 @@

            Navigation

            -input_deterministic_truncated_xor_differential_constraints(number_of_rounds, inverse=False)
            +input_deterministic_truncated_xor_differential_constraints()

            Return a list of CP constraints for the inputs of the cipher for the first step model.

            INPUT:

              @@ -501,7 +502,7 @@

              Navigation

              -parse_solver_information(output_to_parse, truncated)
              +parse_solver_information(output_to_parse)
              @@ -511,7 +512,7 @@

              Navigation

              -solve(model_type, solver_name=None)
              +solve(model_type, solver_name='Chuffed', num_of_processors=None, timelimit=None)

              Return the solution of the model.

              INPUT:

                @@ -534,6 +535,8 @@

                Navigation

              • 'COIN-BC'

              +
            • num_of_processorsinteger; the number of processors to be used

            • +
            • timelimitinteger; time limit to output a result

            EXAMPLES:

            sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import CpXorDifferentialTrailSearchModel
            @@ -554,6 +557,11 @@ 

            Navigation

            +
            +
            +solver_names(verbose=False)
            +
            +
            weight_constraints(weight)
            @@ -586,13 +594,13 @@

            Navigation

            - +
    @@ -625,10 +633,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -636,7 +644,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model.html b/docs/build/html/cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model.html new file mode 100644 index 00000000..b505e9c2 --- /dev/null +++ b/docs/build/html/cipher_modules/models/cp/cp_models/cp_impossible_xor_differential_model.html @@ -0,0 +1,883 @@ + + + + + + + + + Cp impossible xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Cp impossible xor differential model

    +
    +
    +class CpImpossibleXorDifferentialModel(cipher)
    +

    Bases: CpDeterministicTruncatedXorDifferentialModel

    +
    +
    +add_solution_to_components_values(component_id, component_solution, components_values, j, output_to_parse, solution_number, string)
    +
    + +
    +
    +add_solutions_from_components_values(components_values, memory, model_type, solutions, solve_time, solver_name, solver_output)
    +
    + +
    +
    +build_deterministic_truncated_xor_differential_trail_model(fixed_variables=[], number_of_rounds=None)
    +

    Build the CP model for the search of deterministic truncated XOR differential trails.

    +

    INPUT:

    +
      +
    • fixed_variableslist (default: []); dictionaries containing the variables to be fixed in standard +format

    • +
    • number_of_roundsinteger (default: None); number of rounds

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64), integer_to_bit_list(0, 64, 'little'))]
    +sage: cp.build_deterministic_truncated_xor_differential_trail_model(fixed_variables)
    +
    +
    +
    + +
    +
    +build_impossible_xor_differential_trail_model(fixed_variables=[], number_of_rounds=None, middle_round=1)
    +

    Build the CP model for the search of deterministic truncated XOR differential trails.

    +

    INPUT:

    +
      +
    • fixed_variableslist (default: []); dictionaries containing the variables to be fixed in standard +format

    • +
    • number_of_roundsinteger (default: None); number of rounds

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64), integer_to_bit_list(0, 64, 'little'))]
    +sage: cp.build_impossible_xor_differential_trail_model(fixed_variables)
    +
    +
    +
    + +
    +
    +build_inverse_deterministic_truncated_xor_differential_trail_model(number_of_rounds=None, fixed_variables=[])
    +

    Build CP model for search of deterministic truncated XOR differential trails for the inverted cipher.

    +

    INPUT:

    +
      +
    • number_of_roundsinteger (default: []); number of rounds

    • +
    • fixed_variableslist; dictionaries containing the variables to be fixed in standard format

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64), integer_to_bit_list(0, 64, 'little'))]
    +sage: fixed_variables.append(set_fixed_variables('plaintext', 'not_equal', range(32), integer_to_bit_list(0, 32, 'little')))
    +sage: cp.build_inverse_deterministic_truncated_xor_differential_trail_model(2, fixed_variables)
    +
    +
    +
    + +
    +
    +build_mix_column_truncated_table(component)
    +

    Return a model that generates the list of possible input/output couples for the given mix column.

    +

    INPUT:

    +
      +
    • componentComponent object; the mix column component in Cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_model import CpModel
    +sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: aes = AESBlockCipher(number_of_rounds=3)
    +sage: cp = CpModel(aes)
    +sage: mix_column = aes.component_from(0, 21)
    +sage: cp.build_mix_column_truncated_table(mix_column)
    +'array[0..93, 1..8] of int: mix_column_truncated_table_mix_column_0_21 = array2d(0..93, 1..8, [0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,1,0,1,1,1,1,0,0,1,1,0,1,1,1,0,0,1,1,1,0,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1,1,0,1,0,0,1,1,1,1,0,1,0,1,0,1,1,1,0,1,0,1,1,0,1,1,0,1,0,1,1,1,0,1,0,1,0,1,1,1,1,0,0,1,0,1,1,1,1,1,0,1,1,0,0,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,1,1,0,1,0,1,1,0,1,1,1,0,0,1,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1,0,1,0,1,1,1,0,1,1,0,0,1,1,1,0,1,1,1,0,1,1,1,1,0,0,1,0,1,1,1,1,0,1,0,0,1,1,1,1,0,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,1,0,1,1,1,1,0,0,1,1,0,1,1,1,0,0,1,1,1,0,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,1,1,1,0,1,0,0,1,1,1,1,0,1,0,1,0,1,1,1,0,1,0,1,1,0,1,1,0,1,0,1,1,1,0,1,0,1,0,1,1,1,1,1,0,1,1,0,0,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,1,1,0,1,0,1,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1,0,1,0,1,1,1,0,1,1,1,0,1,1,1,1,0,0,1,0,1,1,1,1,0,1,1,0,1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,1,0,1,1,1,1,0,0,1,1,0,1,1,1,0,0,1,1,1,0,1,1,0,0,1,1,1,1,1,1,0,1,0,0,1,1,1,1,0,1,0,1,0,1,1,1,0,1,0,1,1,0,1,1,0,1,0,1,1,1,1,1,0,1,1,0,0,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1,1,1,0,1,1,1,1,0,1,1,0,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,1,0,1,1,1,1,0,0,1,1,0,1,1,1,0,0,1,1,1,1,1,1,0,1,0,0,1,1,1,1,0,1,0,1,0,1,1,1,0,1,0,1,1,1,1,1,0,1,1,0,0,1,1,1,0,1,1,0,1,1,1,1,0,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,1,0,1,1,1,1,0,0,1,1,1,1,1,1,0,1,0,0,1,1,1,1,0,1,0,1,1,1,1,1,0,1,1,0,1,1,1,1,0,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1]);'
    +
    +
    +
    + +
    +
    +calculate_bit_positions(bit_positions, input_length)
    +
    + +
    +
    +calculate_bit_values(bit_values, input_length)
    +
    + +
    +
    +calculate_input_bit_positions(word_index, input_name_1, input_name_2, new_input_bit_positions_1, new_input_bit_positions_2)
    +
    + +
    +
    +property cipher
    +
    + +
    +
    +property cipher_id
    +
    + +
    +
    +extract_incompatibilities_from_output(components_values)
    +
    + +
    +
    +extract_key_schedule()
    +
    + +
    +
    +final_deterministic_truncated_xor_differential_constraints()
    +

    Return a CP constraints list for the cipher outputs and solving indications for single or second step model.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=2)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: cp.final_deterministic_truncated_xor_differential_constraints()[:-1]
    +['solve satisfy;']
    +
    +
    +
    + +
    +
    +final_impossible_constraints(number_of_rounds, middle_round)
    +

    Return a CP constraints list for the cipher outputs and solving indications for single or second step model.

    +

    INPUT:

    +
      +
    • number_of_roundsinteger; number of rounds

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=2)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: cp.final_impossible_constraints(2)[:-2]
    +['solve satisfy;']
    +
    +
    +
    + +
    +
    +find_all_deterministic_truncated_xor_differential_trail(number_of_rounds=None, fixed_values=[], solver_name='Chuffed')
    +

    Return the solution representing a differential trail with any weight.

    +

    INPUT:

    +
      +
    • number_of_roundsinteger; number of rounds

    • +
    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • +
    • solver_namestring (default: None); the name of the solver. Available values are:

      +
        +
      • 'Chuffed'

      • +
      • 'Gecode'

      • +
      • 'COIN-BC'

      • +
      +
    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: plaintext = set_fixed_variables(
    +....:         component_id='plaintext',
    +....:         constraint_type='not_equal',
    +....:         bit_positions=range(32),
    +....:         bit_values=[0]*32)
    +sage: key = set_fixed_variables(
    +....:         component_id='key',
    +....:         constraint_type='equal',
    +....:         bit_positions=range(64),
    +....:         bit_values=[0]*64)
    +sage: cp.find_all_deterministic_truncated_xor_differential_trail(3, [plaintext,key], 'Chuffed') # random
    +[{'cipher_id': 'speck_p32_k64_o32_r3',
    +  'components_values': {'cipher_output_2_12': {'value': '22222222222222202222222222222222',
    +    'weight': 0},
    +  ...
    +  'memory_megabytes': 0.02,
    +  'model_type': 'deterministic_truncated_xor_differential',
    +  'solver_name': 'Chuffed',
    +  'solving_time_seconds': 0.002,
    +  'total_weight': '0.0'}]
    +
    +
    +
    + +
    +
    +find_all_impossible_xor_differential_trails(number_of_rounds, fixed_values=[], solver_name='Chuffed', middle_round=1)
    +

    Return the solution representing a differential trail with any weight.

    +

    INPUT:

    +
      +
    • number_of_roundsinteger; number of rounds

    • +
    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • +
    • solver_namestring (default: None); the name of the solver. Available values are:

      +
        +
      • 'Chuffed'

      • +
      • 'Gecode'

      • +
      • 'COIN-BC'

      • +
      +
    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: plaintext = set_fixed_variables(
    +....:         component_id='plaintext',
    +....:         constraint_type='not_equal',
    +....:         bit_positions=range(32),
    +....:         bit_values=[0]*32)
    +sage: key = set_fixed_variables(
    +....:         component_id='key',
    +....:         constraint_type='equal',
    +....:         bit_positions=range(64),
    +....:         bit_values=[0]*64)
    +sage: cp.find_all_deterministic_truncated_xor_differential_trail(3, [plaintext,key], 'Chuffed') # random
    +[{'cipher_id': 'speck_p32_k64_o32_r3',
    +  'components_values': {'cipher_output_2_12': {'value': '22222222222222202222222222222222',
    +    'weight': 0},
    +  ...
    +  'memory_megabytes': 0.02,
    +  'model_type': 'deterministic_truncated_xor_differential',
    +  'solver_name': 'Chuffed',
    +  'solving_time_seconds': 0.002,
    +  'total_weight': '0.0'}]
    +
    +
    +
    + +
    +
    +find_one_deterministic_truncated_xor_differential_trail(number_of_rounds=None, fixed_values=[], solver_name='Chuffed')
    +

    Return the solution representing a differential trail with any weight.

    +

    INPUT:

    +
      +
    • number_of_roundsinteger (default: None); number of rounds

    • +
    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • +
    • solver_namestring (default: Chuffed); the name of the solver. Available values are:

      +
        +
      • 'Chuffed'

      • +
      • 'Gecode'

      • +
      • 'COIN-BC'

      • +
      +
    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=1)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: plaintext = set_fixed_variables(
    +....:         component_id='plaintext',
    +....:         constraint_type='not_equal',
    +....:         bit_positions=range(32),
    +....:         bit_values=[0]*32)
    +sage: key = set_fixed_variables(
    +....:         component_id='key',
    +....:         constraint_type='equal',
    +....:         bit_positions=range(64),
    +....:         bit_values=[0]*64)
    +sage: cp.find_one_deterministic_truncated_xor_differential_trail(1, [plaintext,key], 'Chuffed') # random
    +[{'cipher_id': 'speck_p32_k64_o32_r1',
    +  'components_values': {'cipher_output_0_6': {'value': '22222222222222212222222222222220',
    +    'weight': 0},
    +   'intermediate_output_0_5': {'value': '0000000000000000', 'weight': 0},
    +   'key': {'value': '0000000000000000000000000000000000000000000000000000000000000000',
    +    'weight': 0},
    +   'modadd_0_1': {'value': '2222222222222221', 'weight': 0},
    +   'plaintext': {'value': '11111111011111111111111111111111', 'weight': 0},
    +   'rot_0_0': {'value': '1111111111111110', 'weight': 0},
    +   'rot_0_3': {'value': '1111111111111111', 'weight': 0},
    +   'xor_0_2': {'value': '2222222222222221', 'weight': 0},
    +   'xor_0_4': {'value': '2222222222222220', 'weight': 0}},
    +  'memory_megabytes': 0.01,
    +  'model_type': 'deterministic_truncated_xor_differential_one_solution',
    +  'solver_name': 'Chuffed',
    +  'solving_time_seconds': 0.0,
    +  'total_weight': '0.0'}]
    +
    +
    +
    + +
    +
    +find_one_impossible_xor_differential_trail(number_of_rounds=None, fixed_values=[], solver_name='Chuffed', middle_round=1)
    +

    Return the solution representing a differential trail with any weight.

    +

    INPUT:

    +
      +
    • number_of_roundsinteger (default: None); number of rounds

    • +
    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • +
    • solver_namestring (default: Chuffed); the name of the solver. Available values are:

      +
        +
      • 'Chuffed'

      • +
      • 'Gecode'

      • +
      • 'COIN-BC'

      • +
      +
    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=1)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: plaintext = set_fixed_variables(
    +....:         component_id='plaintext',
    +....:         constraint_type='not_equal',
    +....:         bit_positions=range(32),
    +....:         bit_values=[0]*32)
    +sage: key = set_fixed_variables(
    +....:         component_id='key',
    +....:         constraint_type='equal',
    +....:         bit_positions=range(64),
    +....:         bit_values=[0]*64)
    +sage: cp.find_one_deterministic_truncated_xor_differential_trail(1, [plaintext,key], 'Chuffed') # random
    +[{'cipher_id': 'speck_p32_k64_o32_r1',
    +  'components_values': {'cipher_output_0_6': {'value': '22222222222222212222222222222220',
    +    'weight': 0},
    +   'intermediate_output_0_5': {'value': '0000000000000000', 'weight': 0},
    +   'key': {'value': '0000000000000000000000000000000000000000000000000000000000000000',
    +   'weight': 0},
    +   'modadd_0_1': {'value': '2222222222222221', 'weight': 0},
    +   'plaintext': {'value': '11111111011111111111111111111111', 'weight': 0},
    +   'rot_0_0': {'value': '1111111111111110', 'weight': 0},
    +   'rot_0_3': {'value': '1111111111111111', 'weight': 0},
    +   'xor_0_2': {'value': '2222222222222221', 'weight': 0},
    +   'xor_0_4': {'value': '2222222222222220', 'weight': 0}},
    +  'memory_megabytes': 0.01,
    +  'model_type': 'deterministic_truncated_xor_differential_one_solution',
    +  'solver_name': 'Chuffed',
    +  'solving_time_seconds': 0.0,
    +  'total_weight': '0.0'}]
    +
    +
    +
    + +
    +
    +find_possible_number_of_active_sboxes(weight)
    +

    Return a set whose numbers are the possible numbers of active S-boxes.

    +

    INPUT:

    +
      +
    • weightinteger; the fixed weight that must be able to be obtained with the found numbers of active S-boxes

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_model import CpModel
    +sage: midori = MidoriBlockCipher()
    +sage: cp = CpModel(midori)
    +sage: model = cp.find_possible_number_of_active_sboxes(9)
    +sage: model
    +{3, 4}
    +
    +
    +
    + +
    +
    +fix_variables_value_constraints(fixed_variables=[], step='full_model')
    +

    Return a list of CP constraints that fix the input variables to a specific value.

    +

    INPUT:

    +
      +
    • fixed_variableslist (default: []); dictionaries containing name, bit_size, +value (as integer) for the variables that need to be fixed to a certain value:

      +
      +
      {

      ‘component_id’: ‘plaintext’,

      +

      ‘constraint_type’: ‘equal’/’not_equal’

      +

      ‘bit_size’: 32,

      +

      ‘value’: 753

      +
      +
      +

      }

      +
    • +
    • stepstring (default: full_model)

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_model import CpModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    +sage: cp = CpModel(speck)
    +sage: cp.fix_variables_value_constraints([set_fixed_variables('plaintext', 'equal', range(4), integer_to_bit_list(5, 4, 'big'))])
    +['constraint plaintext[0] = 0 /\\ plaintext[1] = 1 /\\ plaintext[2] = 0 /\\ plaintext[3] = 1;']
    +sage: cp.fix_variables_value_constraints([set_fixed_variables('plaintext', 'not_equal', list(range(4)), integer_to_bit_list(5, 4, 'big'))])
    +['constraint plaintext[0] != 0 \\/ plaintext[1] != 1 \\/ plaintext[2] != 0 \\/ plaintext[3] != 1;']
    +
    +
    +
    + +
    +
    +property float_and_lat_values
    +
    + +
    +
    +format_component_value(component_id, string)
    +
    + +
    +
    +get_command_for_solver_process(input_file_path, model_type, solver_name, num_of_processors, timelimit)
    +
    + +
    +
    +get_mix_column_all_inputs(input_bit_positions_1, input_id_link_1, numb_of_inp_1)
    +
    + +
    +
    +get_total_weight(string_total_weight)
    +
    + +
    +
    +initialise_model()
    +
    + +
    +
    +input_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of CP constraints for the inputs of the cipher for the first step model.

    +

    INPUT:

    +
      +
    • number_of_roundsinteger; number of rounds

    • +
    • inverseboolean (default: False)

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: aes = AESBlockCipher()
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(aes)
    +sage: cp.input_deterministic_truncated_xor_differential_constraints(10)
    +(['array[0..127] of var 0..2: key;',
    +  'array[0..127] of var 0..2: plaintext;',
    +   ...
    +  'constraint count(plaintext,2) = 0;'])
    +
    +
    +
    + +
    +
    +input_impossible_xor_differential_constraints(number_of_rounds=None, middle_round=None)
    +
    + +
    +
    +input_wordwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of CP constraints for the inputs of the cipher for truncated deterministic xor differential model.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: aes = AESBlockCipher(number_of_rounds = 2)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(aes)
    +sage: cp.input_wordwise_deterministic_truncated_xor_differential_constraints()
    +(['array[0..15] of var 0..3: key_active;',
    +  'array[0..15] of var -2..255: key_value;',
    +   ...
    +  'array[0..15] of var -2..255: cipher_output_1_32_value;'],
    + ['constraint if key_active[0] == 0 then key_value[0] = 0 elseif key_active[0] == 1 then key_value[0] > 0 elseif key_active[0] == 2 then key_value[0] =-1 else key_value[0] =-2 endif;',
    +   ...
    +  'constraint if cipher_output_1_32_active[15] == 0 then cipher_output_1_32_value[15] = 0 elseif cipher_output_1_32_active[15] == 1 then cipher_output_1_32_value[15] > 0 elseif cipher_output_1_32_active[15] == 2 then cipher_output_1_32_value[15] =-1 else cipher_output_1_32_value[15] =-2 endif;',
    +  'constraint count(cipher_output_1_32_active,2) < 128;',
    +  'constraint count(plaintext,1) > 0;',
    +  'constraint count(plaintext,2) = 0;'])
    +
    +
    +
    + +
    +
    +property model_constraints
    +

    Return the model specified by model_type.

    +

    INPUT:

    +
      +
    • model_typestring; the model to retrieve

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_model import CpModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=4)
    +sage: cp = CpModel(speck)
    +sage: cp.model_constraints()
    +Traceback (most recent call last):
    +...
    +ValueError: No model generated
    +
    +
    +
    + +
    +
    +output_constraints(component)
    +

    Return lists of declarations and constraints for CP output component (both intermediate and cipher).

    +

    INPUT:

    +
      +
    • componentComponent object; the output component (intermediate or cipher) in Cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: output_component = speck.component_from(0, 5)
    +sage: cp.output_constraints(output_component)
    +([],
    + ['constraint intermediate_output_0_5[0] = key[48];',
    + ...
    +  'constraint intermediate_output_0_5[15] = key[63];'])
    +
    +
    +
    + +
    +
    +output_inverse_constraints(component)
    +

    Return lists of declarations and constraints for CP output component (both intermediate and cipher).

    +

    INPUT:

    +
      +
    • componentComponent object; the output component (intermediate or cipher)

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_deterministic_truncated_xor_differential_model import CpDeterministicTruncatedXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: cp = CpDeterministicTruncatedXorDifferentialModel(speck)
    +sage: output_component = speck.component_from(0, 5)
    +sage: cp.output_inverse_constraints(output_component)
    +([],
    + ['constraint intermediate_output_0_5_inverse[0] = key[48];',
    +   ...
    +  'constraint intermediate_output_0_5_inverse[15] = key[63];'])
    +
    +
    +
    + +
    +
    +parse_solver_information(output_to_parse)
    +
    + +
    +
    +set_component_solution_value(component_solution, truncated, value)
    +
    + +
    +
    +solve(model_type, solver_name='Chuffed', num_of_processors=None, timelimit=None)
    +

    Return the solution of the model.

    +

    INPUT:

    +
      +
    • model_typestring; the model to solve:

      +
        +
      • ‘cipher’

      • +
      • ‘xor_differential’

      • +
      • ‘xor_differential_one_solution’

      • +
      • ‘xor_linear’

      • +
      • ‘xor_linear_one_solution’

      • +
      • ‘deterministic_truncated_xor_differential’

      • +
      • ‘deterministic_truncated_xor_differential_one_solution’

      • +
      • ‘impossible_xor_differential’

      • +
      +
    • +
    • solver_namestring (default: None); the name of the solver. Available values are:

      +
        +
      • 'Chuffed'

      • +
      • 'Gecode'

      • +
      • 'COIN-BC'

      • +
      +
    • +
    • num_of_processorsinteger; the number of processors to be used

    • +
    • timelimitinteger; time limit to output a result

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import CpXorDifferentialTrailSearchModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    +sage: cp = CpXorDifferentialTrailSearchModel(speck)
    +sage: fixed_variables = [set_fixed_variables('key', 'equal', list(range(64)), integer_to_bit_list(0, 64, 'little')), set_fixed_variables('plaintext', 'not_equal', list(range(32)), integer_to_bit_list(0, 32, 'little'))]
    +sage: cp.build_xor_differential_trail_model(-1, fixed_variables)
    +sage: cp.solve('xor_differential', 'Chuffed') # random
    +[{'cipher_id': 'speck_p32_k64_o32_r4',
    +  ...
    +  'total_weight': '7'},
    + {'cipher_id': 'speck_p32_k64_o32_r4',
    +   ...
    +  'total_weight': '5'}]
    +
    +
    +
    + +
    +
    +solver_names(verbose=False)
    +
    + +
    +
    +weight_constraints(weight)
    +

    Return a list of CP constraints that fix the total weight to a specific value.

    +

    INPUT:

    +
      +
    • weightinteger; a specific weight. If set to non-negative integer, fixes the XOR trail weight

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.cp.cp_model import CpModel
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    +sage: cp = CpModel(speck)
    +sage: cp.weight_constraints(10)
    +(['constraint weight = 1000;'], [])
    +
    +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_model.html b/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_model.html index 617fb008..416d4918 100644 --- a/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Cp xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cp xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Cp xor differential model

    +

    Cp xor differential model

    class CpXorDifferentialModel(cipher)
    -

    Bases: claasp.cipher_modules.models.cp.cp_model.CpModel

    +

    Bases: CpModel

    add_solution_to_components_values(component_id, component_solution, components_values, j, output_to_parse, solution_number, string)
    @@ -103,16 +104,10 @@

    Navigation

    standard format

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64),
    -....: integer_to_bit_list(0, 64, 'little'))]
    -sage: fixed_variables.append(set_fixed_variables('plaintext', 'equal', range(32),
    -....: integer_to_bit_list(0, 32, 'little')))
    +sage: cp = CpXorDifferentialModel(speck)
     sage: cp.build_xor_differential_trail_model(-1, fixed_variables)
     
    @@ -157,17 +152,11 @@

    Navigation

  • weightinteger; a specific weight. If set to non-negative integer, fixes the XOR trail weight

  • EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64),
    -....: integer_to_bit_list(0, 64, 'little'))]
    -sage: fixed_variables.append(set_fixed_variables('plaintext', 'equal', range(32),
    -....: integer_to_bit_list(0, 32, 'little')))
    -sage: cp.build_xor_differential_trail_model(-1, fixed_variables)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: cp.build_xor_differential_trail_model(-1)
     sage: cp.final_xor_differential_constraints(-1)[:-1]
     ['solve:: int_search(p, smallest, indomain_min, complete) minimize weight;']
     
    @@ -177,7 +166,8 @@

    Navigation

    find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='Chuffed')
    -

    Return a list of solutions containing all the differential trails having the fixed_weight weight.

    +

    Return a list of solutions containing all the differential trails having the fixed_weight weight. +By default, the search is set in the single-key setting.

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • @@ -191,21 +181,25 @@

      Navigation

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    -sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=2)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: fixed_values = []
    -sage: fixed_values.append(set_fixed_variables('key', 'equal', list(range(16)),
    -....: integer_to_bit_list(0, 16, 'big')))
    -sage: fixed_values.append(set_fixed_variables('plaintext', 'not_equal', list(range(8)),
    -....: integer_to_bit_list(0, 8, 'big')))
    -sage: trails = cp.find_all_xor_differential_trails_with_fixed_weight(1, fixed_values, 'Chuffed') # long
    -...
    -sage: len(trails) # long
    -6
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: trails = cp.find_all_xor_differential_trails_with_fixed_weight(9, solver_name='Chuffed')
    +sage: len(trails)
    +2
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher( number_of_rounds=5)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trails = cp.find_all_xor_differential_trails_with_fixed_weight(2, fixed_values=[key], solver_name='Chuffed')
    +sage: len(trails)
    +2
     
    @@ -213,8 +207,9 @@

    Navigation

    find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight=64, fixed_values=[], solver_name='Chuffed')
    -

    Return a list of solutions containing all the differential trails.

    -

    The differential trails having the weight of correlation lying in the interval [min_weight, max_weight].

    +

    Return a list of solutions containing all the differential trails. +By default, the search is set in the single-key setting. +The differential trails having the weight of correlation lying in the interval [min_weight, max_weight].

    INPUT:

    • min_weightinteger; the weight from which to start the search

    • @@ -229,21 +224,25 @@

      Navigation

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    -sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=2)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: fixed_values = []
    -sage: fixed_values.append(set_fixed_variables('key', 'equal', list(range(16)),
    -....: integer_to_bit_list(0, 16, 'big')))
    -sage: fixed_values.append(set_fixed_variables('plaintext', 'not_equal', list(range(8)),
    -....: integer_to_bit_list(0, 8, 'big')))
    -sage: trails = cp.find_all_xor_differential_trails_with_weight_at_most(0,1, fixed_values, 'Chuffed')
    -...
    -sage: len(trails) # long
    -7
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: trails = cp.find_all_xor_differential_trails_with_weight_at_most(9,10, solver_name='Chuffed')
    +sage: len(trails)
    +28
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trails = cp.find_all_xor_differential_trails_with_weight_at_most(2,3, fixed_values=[key], solver_name='Chuffed') # long
    +sage: len(trails)
    +9
     
    @@ -256,7 +255,8 @@

    Navigation

    find_lowest_weight_xor_differential_trail(fixed_values=[], solver_name='Chuffed')
    -

    Return the solution representing a differential trail with the lowest weight of correlation.

    +

    Return the solution representing a differential trail with the lowest probability weight. +By default, the search is set in the single-key setting.

    Note

    There could be more than one trail with the lowest weight. In order to find all the lowest weight @@ -274,24 +274,29 @@

    Navigation

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: fixed_values = []
    -sage: fixed_values.append(set_fixed_variables('key', 'equal', list(range(64)),
    -....: integer_to_bit_list(0, 64, 'big')))
    -sage: fixed_values.append(set_fixed_variables('plaintext', 'not_equal', list(range(32)),
    -....: integer_to_bit_list(0, 32, 'big')))
    -sage: cp.find_lowest_weight_xor_differential_trail(fixed_values,'Chuffed') # random
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: cp.find_lowest_weight_xor_differential_trail(solver_name='Chuffed') # random
     {'building_time': 0.007165431976318359,
      'cipher_id': 'speck_p32_k64_o32_r4',
      'components_values': {'cipher_output_4_12': {'value': '850a9520',
      'weight': 0},
       ...
      'total_weight': '9.0'}
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(32)), [0] * 32)
    +sage: trail = cp.find_lowest_weight_xor_differential_trail(fixed_values=[key], solver_name='Chuffed')
    +sage: trail['total_weight']
    +'1.0'
     
    @@ -299,7 +304,8 @@

    Navigation

    find_one_xor_differential_trail(fixed_values=[], solver_name='Chuffed')
    -

    Return the solution representing a differential trail with any weight.

    +

    Return the solution representing a differential trail with any weight. +By default, the search is set in the single-key setting.

    INPUT:

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • @@ -312,23 +318,26 @@

      Navigation

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=2)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=[0]*32)
    -sage: cp.find_one_xor_differential_trail([plaintext], 'Chuffed') # random
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: cp.find_one_xor_differential_trail(solver_name=Chuffed') # random
     {'cipher_id': 'speck_p32_k64_o32_r2',
      'model_type': 'xor_differential_one_solution',
       ...
      'cipher_output_1_12': {'value': 'ffff0000', 'weight': 0}},
      'total_weight': '18.0'}
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=2)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(32)), [0] * 32)
    +sage: trail = cp.find_one_xor_differential_trail(fixed_values=[key], solver_name='Chuffed') # random
     
    @@ -336,7 +345,8 @@

    Navigation

    find_one_xor_differential_trail_with_fixed_weight(fixed_weight=- 1, fixed_values=[], solver_name='Chuffed')
    -

    Return the solution representing a differential trail with the weight of correlation equal to fixed_weight.

    +

    Return the solution representing a differential trail with the weight of probability equal to fixed_weight. +By default, the search is set in the single-key setting.

    INPUT:

    • fixed_weightinteger; the value to which the weight is fixed, if non-negative

    • @@ -350,23 +360,25 @@

      Navigation

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(number_of_rounds=5)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=[0]*32)
    -sage: cp.find_one_xor_differential_trail_with_fixed_weight(9, [plaintext], 'Chuffed') # random
    -{'cipher_id': 'speck_p32_k64_o32_r5',
    - 'model_type': 'xor_differential_one_solution',
    - ...
    - 'total_weight': '9.0',
    - 'building_time_seconds': 0.0013153553009033203}
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: trail = cp.find_one_xor_differential_trail_with_fixed_weight(3, solver_name='Chuffed') # random
    +sage: trail['total_weight']
    +'3.0'
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trail = cp.find_one_xor_differential_trail_with_fixed_weight(3, fixed_values=[key], solver_name='Chuffed')
    +sage: trail['total_weight']
    +'3.0'
     
    @@ -436,7 +448,7 @@

    Navigation

    -get_command_for_solver_process(input_file_path, model_type, solver_name)
    +get_command_for_solver_process(input_file_path, model_type, solver_name, num_of_processors, timelimit)
    @@ -469,10 +481,9 @@

    Navigation

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    +sage: cp = CpXorDifferentialModel(speck)
     sage: cp.input_xor_differential_constraints()
     (['array[0..31] of var 0..1: plaintext;',
       'array[0..63] of var 0..1: key;',
    @@ -518,7 +529,7 @@ 

    Navigation

    -solve(model_type, solver_name=None)
    +solve(model_type, solver_name='Chuffed', num_of_processors=None, timelimit=None)

    Return the solution of the model.

    INPUT:

      @@ -541,6 +552,8 @@

      Navigation

    • 'COIN-BC'

    +
  • num_of_processorsinteger; the number of processors to be used

  • +
  • timelimitinteger; time limit to output a result

  • EXAMPLES:

    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import CpXorDifferentialTrailSearchModel
    @@ -561,6 +574,11 @@ 

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    update_sbox_ddt_valid_probabilities(component, valid_probabilities)
    @@ -596,7 +614,7 @@

    Navigation

  • numaddinteger; the number of addenda

  • EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (
     ....:     and_xor_differential_probability_ddt)
     sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
     sage: simon = SimonBlockCipher()
    @@ -622,13 +640,13 @@ 

    Navigation

    This Page

    @@ -646,7 +664,7 @@

    Quick search

    - +
    @@ -661,10 +679,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -672,7 +690,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_number_of_active_sboxes_model.html b/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_number_of_active_sboxes_model.html index ae431755..b3ae9788 100644 --- a/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_number_of_active_sboxes_model.html +++ b/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_number_of_active_sboxes_model.html @@ -1,23 +1,24 @@ - + - Cp xor differential number of active sboxes model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cp xor differential number of active sboxes model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Cp xor differential number of active sboxes model

    +

    Cp xor differential number of active sboxes model

    class CpXorDifferentialNumberOfActiveSboxesModel(cipher)
    -

    Bases: claasp.cipher_modules.models.cp.cp_model.CpModel

    +

    Bases: CpModel

    add_additional_xor_constraints(nmax, repetition)
    @@ -286,7 +287,7 @@

    Navigation

    -get_command_for_solver_process(input_file_path, model_type, solver_name)
    +get_command_for_solver_process(input_file_path, model_type, solver_name, num_of_processors, timelimit)
    @@ -369,7 +370,7 @@

    Navigation

    -solve(model_type, solver_name=None)
    +solve(model_type, solver_name='Chuffed', num_of_processors=None, timelimit=None)

    Return the solution of the model.

    INPUT:

      @@ -392,6 +393,8 @@

      Navigation

    • 'COIN-BC'

    +
  • num_of_processorsinteger; the number of processors to be used

  • +
  • timelimitinteger; time limit to output a result

  • EXAMPLES:

    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import CpXorDifferentialTrailSearchModel
    @@ -412,6 +415,11 @@ 

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    weight_constraints(weight)
    @@ -485,13 +493,13 @@

    Navigation

    This Page

    @@ -509,7 +517,7 @@

    Quick search

    - +
    @@ -524,10 +532,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -535,7 +543,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_trail_search_fixing_number_of_active_sboxes_model.html b/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_trail_search_fixing_number_of_active_sboxes_model.html index c4a9158b..3db8ac19 100644 --- a/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_trail_search_fixing_number_of_active_sboxes_model.html +++ b/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_differential_trail_search_fixing_number_of_active_sboxes_model.html @@ -1,23 +1,24 @@ - + - Cp xor differential trail search fixing number of active sboxes model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cp xor differential trail search fixing number of active sboxes model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Cp xor differential trail search fixing number of active sboxes model

    +

    Cp xor differential trail search fixing number of active sboxes model

    class CpXorDifferentialFixingNumberOfActiveSboxesModel(cipher)
    -

    Bases: claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model.CpXorDifferentialModel, claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_number_of_active_sboxes_model.CpXorDifferentialNumberOfActiveSboxesModel

    +

    Bases: CpXorDifferentialModel, CpXorDifferentialNumberOfActiveSboxesModel

    add_additional_xor_constraints(nmax, repetition)
    @@ -158,16 +159,10 @@

    Navigation

    standard format

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64),
    -....: integer_to_bit_list(0, 64, 'little'))]
    -sage: fixed_variables.append(set_fixed_variables('plaintext', 'equal', range(32),
    -....: integer_to_bit_list(0, 32, 'little')))
    +sage: cp = CpXorDifferentialModel(speck)
     sage: cp.build_xor_differential_trail_model(-1, fixed_variables)
     
    @@ -263,17 +258,11 @@

    Navigation

  • weightinteger; a specific weight. If set to non-negative integer, fixes the XOR trail weight

  • EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64),
    -....: integer_to_bit_list(0, 64, 'little'))]
    -sage: fixed_variables.append(set_fixed_variables('plaintext', 'equal', range(32),
    -....: integer_to_bit_list(0, 32, 'little')))
    -sage: cp.build_xor_differential_trail_model(-1, fixed_variables)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: cp.build_xor_differential_trail_model(-1)
     sage: cp.final_xor_differential_constraints(-1)[:-1]
     ['solve:: int_search(p, smallest, indomain_min, complete) minimize weight;']
     
    @@ -345,8 +334,9 @@

    Navigation

    find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight=64, fixed_values=[], solver_name='Chuffed')
    -

    Return a list of solutions containing all the differential trails.

    -

    The differential trails having the weight of correlation lying in the interval [min_weight, max_weight].

    +

    Return a list of solutions containing all the differential trails. +By default, the search is set in the single-key setting. +The differential trails having the weight of correlation lying in the interval [min_weight, max_weight].

    INPUT:

    • min_weightinteger; the weight from which to start the search

    • @@ -361,21 +351,25 @@

      Navigation

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (
    -....:     CpXorDifferentialTrailSearchModel)
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    -sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=2)
    -sage: cp = CpXorDifferentialTrailSearchModel(speck)
    -sage: fixed_values = []
    -sage: fixed_values.append(set_fixed_variables('key', 'equal', list(range(16)),
    -....: integer_to_bit_list(0, 16, 'big')))
    -sage: fixed_values.append(set_fixed_variables('plaintext', 'not_equal', list(range(8)),
    -....: integer_to_bit_list(0, 8, 'big')))
    -sage: trails = cp.find_all_xor_differential_trails_with_weight_at_most(0,1, fixed_values, 'Chuffed')
    -...
    -sage: len(trails) # long
    -7
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: trails = cp.find_all_xor_differential_trails_with_weight_at_most(9,10, solver_name='Chuffed')
    +sage: len(trails)
    +28
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_model import (CpXorDifferentialModel)
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: cp = CpXorDifferentialModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trails = cp.find_all_xor_differential_trails_with_weight_at_most(2,3, fixed_values=[key], solver_name='Chuffed') # long
    +sage: len(trails)
    +9
     
    @@ -590,7 +584,7 @@

    Navigation

    -get_command_for_solver_process(input_file_path, model_type, solver_name)
    +get_command_for_solver_process(input_file_path, model_type, solver_name, num_of_processors, timelimit)
    @@ -706,7 +700,7 @@

    Navigation

    -solve(model_type, solver_name=None)
    +solve(model_type, solver_name='Chuffed', num_of_processors=None, timelimit=None)

    Return the solution of the model.

    INPUT:

      @@ -729,6 +723,8 @@

      Navigation

    • 'COIN-BC'

    +
  • num_of_processorsinteger; the number of processors to be used

  • +
  • timelimitinteger; time limit to output a result

  • EXAMPLES:

    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import CpXorDifferentialTrailSearchModel
    @@ -751,7 +747,7 @@ 

    Navigation

    -solve_full_two_steps_xor_differential_model(model_type='xor_differential_one_solution', weight=- 1, fixed_variables=[], first_step_solver_name=None, second_step_solver_name=None, nmax=2, repetition=1)
    +solve_full_two_steps_xor_differential_model(model_type='xor_differential_one_solution', weight=- 1, fixed_variables=[], first_step_solver_name='Chuffed', second_step_solver_name='Chuffed', nmax=2, repetition=1)

    Return the solution of the model for an SPN cipher.

    INPUT:

      @@ -830,6 +826,11 @@

      Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    transform_first_step_model(attempt, active_sboxes, weight=- 1)
    @@ -919,13 +920,13 @@

    Navigation

    - +
    @@ -958,10 +959,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -969,7 +970,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_linear_model.html b/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_linear_model.html index b3c39325..e29aa956 100644 --- a/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_linear_model.html +++ b/docs/build/html/cipher_modules/models/cp/cp_models/cp_xor_linear_model.html @@ -1,23 +1,24 @@ - + - Cp xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cp xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Cp xor linear model

    +

    Cp xor linear model

    class CpXorLinearModel(cipher)
    -

    Bases: claasp.cipher_modules.models.cp.cp_model.CpModel

    +

    Bases: CpModel

    add_solution_to_components_values(component_id, component_solution, components_values, j, output_to_parse, solution_number, string)
    @@ -210,7 +211,8 @@

    Navigation

    find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='Chuffed')
    -

    Return a list of solutions containing all the linear trails having the fixed_weight weight of correlation.

    +

    Return a list of solutions containing all the linear trails having the fixed_weight weight of correlation. +By default, the search removes the key schedule, if any.

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • @@ -226,14 +228,22 @@

      Navigation

      EXAMPLES:

      sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
       sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
      -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
       sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=3)
      -sage: speck = speck.remove_key_schedule()
       sage: cp = CpXorLinearModel(speck)
      -sage: fixed_variables = [set_fixed_variables('plaintext', 'not_equal', list(range(8)), integer_to_bit_list(0, 8, 'little'))]
      -sage: trails = cp.find_all_xor_linear_trails_with_fixed_weight(1, fixed_variables) # long
      -sage: len(trails) # long
      +sage: trails = cp.find_all_xor_linear_trails_with_fixed_weight(1) # long
      +sage: len(trails)
       12
      +
      +# including the key schedule in the model
      +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
      +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
      +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
      +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
      +sage: cp = CpXorLinearModel(speck)
      +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
      +sage: trails = cp.find_all_xor_linear_trails_with_fixed_weight(2, fixed_values=[key])
      +sage: len(trails)
      +8
       
    @@ -241,7 +251,8 @@

    Navigation

    find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight=64, fixed_values=[], solver_name='Chuffed')
    -

    Return a list of solutions containing all the linear trails having the weight of correlation lying in the interval [min_weight, max_weight].

    +

    Return a list of solutions containing all the linear trails having the weight of correlation lying in the interval [min_weight, max_weight]. +By default, the search removes the key schedule, if any.

    INPUT:

    • min_weightinteger; the weight from which to start the search

    • @@ -258,14 +269,22 @@

      Navigation

      EXAMPLES:

      sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
       sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
      -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
       sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=3)
      -sage: speck = speck.remove_key_schedule()
       sage: cp = CpXorLinearModel(speck)
      -sage: fixed_variables = [set_fixed_variables('plaintext', 'not_equal', list(range(8)), integer_to_bit_list(0, 8, 'little'))]
      -sage: trails = cp.find_all_xor_linear_trails_with_weight_at_most(0, 1, fixed_variables) # long time
      -sage: len(trails) # long time
      +sage: trails = cp.find_all_xor_linear_trails_with_weight_at_most(0, 1)
      +sage: len(trails)
       13
      +
      +# including the key schedule in the model
      +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
      +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
      +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
      +sage: cp = CpXorLinearModel(speck)
      +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
      +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
      +sage: trails = cp.find_all_xor_linear_trails_with_weight_at_most(0, 3, fixed_values=[key])
      +sage: len(trails)
      +73
       
    @@ -273,7 +292,8 @@

    Navigation

    find_lowest_weight_xor_linear_trail(fixed_values=[], solver_name='Chuffed')
    -

    Return the solution representing a linear trail with the lowest weight of correlation.

    +

    Return the solution representing a linear trail with the lowest weight of correlation. +By default, the search removes the key schedule, if any.

    Note

    There could be more than one trail with the lowest weight. In order to find all the lowest weight @@ -293,19 +313,22 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    -sage: speck = speck.remove_key_schedule()
    +sage: speck = SpeckBlockCipher(number_of_rounds=4)
     sage: cp= CpXorLinearModel(speck)
    -sage: fixed_variables = [set_fixed_variables('plaintext', 'not_equal', list(range(32)), integer_to_bit_list(0, 32, 'little'))]
    -sage: cp.find_lowest_weight_xor_linear_trail(fixed_variables) # random
    -{'building_time': 0.007994651794433594,
    - 'cipher_id': 'speck_p32_k64_o32_r4',
    -  'components_values': {'cipher_output_3_12_o': {'value': '38103010',
    -  'weight': 0},
    -   ...
    -  'total_weight': 3.0
    -  'building_time_seconds': 0.009123563766479492}
    +sage: trail = cp.find_lowest_weight_xor_linear_trail()
    +sage: trail['total_weight']
    +'3.0'
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=4)
    +sage: cp = CpXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(32)), [0] * 32)
    +sage: trail = cp.find_lowest_weight_xor_linear_trail(fixed_values=[key])
    +sage: trail['total_weight']
    +'3.0'
     
    @@ -313,7 +336,8 @@

    Navigation

    find_one_xor_linear_trail(fixed_values=[], solver_name='Chuffed')
    -

    Return the solution representing a linear trail with any weight of correlation.

    +

    Return the solution representing a linear trail with any weight of correlation. +By default, the search removes the key schedule, if any.

    INPUT:

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • @@ -328,20 +352,18 @@

      Navigation

      EXAMPLES:

      sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
       sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
      -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
       sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
      -sage: speck = speck.remove_key_schedule()
       sage: cp = CpXorLinearModel(speck)
      -sage: fixed_variables = [set_fixed_variables('plaintext', 'not_equal', list(range(32)), integer_to_bit_list(0, 32, 'little'))]
      -sage: cp.find_one_xor_linear_trail(fixed_variables) # random
      -{'cipher_id': 'speck_p32_k64_o32_r4',
      - ...
      - 'memory': '0.0MB',
      - 'components_values': {'plaintext': {'weight': 0, 'value': '0xffff'},
      -  ...
      - 'cipher_output_3_12': {'weight': 0, 'value': '0xffffffff'}},
      - 'total_weight': 16.0
      - 'building_time_seconds': 0.00975656509399414}
      +sage: cp.find_one_xor_linear_trail() # random
      +
      +# including the key schedule in the model
      +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
      +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
      +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
      +sage: cp = CpXorLinearModel(speck)
      +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
      +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
      +sage: cp.find_one_xor_linear_trail(fixed_values=[key]) # random
       
    @@ -349,7 +371,8 @@

    Navigation

    find_one_xor_linear_trail_with_fixed_weight(fixed_weight=- 1, fixed_values=[], solver_name='Chuffed')
    -

    Return the solution representing a linear trail with the weight of correlation equal to fixed_weight.

    +

    Return the solution representing a linear trail with the weight of correlation equal to fixed_weight. +By default, the search removes the key schedule, if any.

    INPUT:

    • fixed_weightinteger; the value to which the weight is fixed, if non-negative

    • @@ -365,17 +388,22 @@

      Navigation

      EXAMPLES:

      sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
       sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
      -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
       sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
      -sage: speck = speck.remove_key_schedule()
       sage: cp = CpXorLinearModel(speck)
      -sage: fixed_variables = [set_fixed_variables('plaintext', 'not_equal', list(range(32)), integer_to_bit_list(0, 32, 'little'))]
      -sage: cp.find_one_xor_linear_trail_with_fixed_weight(3, fixed_variables) # random
      -{'cipher_id': 'speck_p32_k64_o32_r4',
      - 'model_type': 'xor_linear_one_solution',
      - ...
      - 'total_weight': 3.0,
      - 'building_time_seconds': 0.005683183670043945}
      +sage: trail = cp.find_one_xor_linear_trail_with_fixed_weight(3)
      +sage: trail['total_weight']
      +'3.0'
      +
      +# including the key schedule in the model
      +sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_linear_model import CpXorLinearModel
      +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
      +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
      +sage: cp = CpXorLinearModel(speck)
      +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
      +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
      +sage: trail = cp.find_one_xor_linear_trail_with_fixed_weight(3, fixed_values=[key])
      +sage: trail['total_weight']
      +'3.0'
       
    @@ -476,7 +504,7 @@

    Navigation

    -get_command_for_solver_process(input_file_path, model_type, solver_name)
    +get_command_for_solver_process(input_file_path, model_type, solver_name, num_of_processors, timelimit)
    @@ -560,7 +588,7 @@

    Navigation

    -solve(model_type, solver_name=None)
    +solve(model_type, solver_name='Chuffed', num_of_processors=None, timelimit=None)

    Return the solution of the model.

    INPUT:

      @@ -583,6 +611,8 @@

      Navigation

    • 'COIN-BC'

    +
  • num_of_processorsinteger; the number of processors to be used

  • +
  • timelimitinteger; time limit to output a result

  • EXAMPLES:

    sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import CpXorDifferentialTrailSearchModel
    @@ -603,6 +633,11 @@ 

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    update_and_or_lat_valid_probabilities(and_already_added, component, cp_declarations, valid_probabilities)
    @@ -664,13 +699,13 @@

    Navigation

    - +
    @@ -703,10 +738,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -714,7 +749,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/cp/minizinc_utils/usefulfunctions.html b/docs/build/html/cipher_modules/models/cp/minizinc_utils/usefulfunctions.html new file mode 100644 index 00000000..f54de9c5 --- /dev/null +++ b/docs/build/html/cipher_modules/models/cp/minizinc_utils/usefulfunctions.html @@ -0,0 +1,221 @@ + + + + + + + + + Usefulfunctions — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Usefulfunctions

    +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/cp/solvers.html b/docs/build/html/cipher_modules/models/cp/solvers.html new file mode 100644 index 00000000..c643414a --- /dev/null +++ b/docs/build/html/cipher_modules/models/cp/solvers.html @@ -0,0 +1,221 @@ + + + + + + + + + Solvers — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Solvers

    +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/milp/milp_model.html b/docs/build/html/cipher_modules/models/milp/milp_model.html index c9d1703b..f91b9db6 100644 --- a/docs/build/html/cipher_modules/models/milp/milp_model.html +++ b/docs/build/html/cipher_modules/models/milp/milp_model.html @@ -1,23 +1,24 @@ - + - Milp model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Milp model

    +

    Milp model

    The target of this module is to find different kind of trails associated to a cryptanalysis technique by using MILP, e.g. the search for XOR differential trails.

    The user is asked to use one of the following MILP solver, some need to be installed and integrated to sage beforehand. @@ -75,7 +76,7 @@

    Navigation

    The default choice is GLPK.

    -class MilpModel(cipher, n_window_heuristic=None)
    +class MilpModel(cipher, n_window_heuristic=None, verbose=False)

    Bases: object

    Build MILP models for ciphers using Cipher.

    @@ -222,13 +223,19 @@

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    -weight_constraints(weight)
    +weight_constraints(weight, weight_precision=2)

    Return a list of variables and a list of constraints that fix the total weight to a specific value.

    INPUT:

    • weightinteger; the total weight. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
    @@ -313,11 +320,6 @@ 

    Navigation

    -
    -
    -verbose_print(*a, **k)
    -
    -
    @@ -329,13 +331,13 @@

    Navigation

    Previous topic

    -

    Utils

    +

    Cms xor linear model

    This Page

    @@ -353,7 +355,7 @@

    Quick search

    - +
    @@ -368,10 +370,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -379,7 +381,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/milp_models/milp_bitwise_deterministic_truncated_xor_differential_model.html b/docs/build/html/cipher_modules/models/milp/milp_models/milp_bitwise_deterministic_truncated_xor_differential_model.html index 0a4ce6b2..1980d35f 100644 --- a/docs/build/html/cipher_modules/models/milp/milp_models/milp_bitwise_deterministic_truncated_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/milp/milp_models/milp_bitwise_deterministic_truncated_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Milp bitwise deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp bitwise deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Milp bitwise deterministic truncated xor differential model

    +

    Milp bitwise deterministic truncated xor differential model

    -class MilpBitwiseDeterministicTruncatedXorDifferentialModel(cipher, n_window_heuristic=None)
    -

    Bases: claasp.cipher_modules.models.milp.milp_model.MilpModel

    +class MilpBitwiseDeterministicTruncatedXorDifferentialModel(cipher, n_window_heuristic=None, verbose=False) +

    Bases: MilpModel

    add_constraints_to_build_in_sage_milp_class(fixed_variables=[])
    @@ -129,13 +130,14 @@

    Navigation

    -find_lowest_varied_patterns_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='GLPK')
    +find_lowest_varied_patterns_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='GLPK', external_solver_name=None)

    Return the solution representing a differential trail with the lowest number of unknown variables.

    INPUTS:

    • solver_namestr, the solver to call

    • fixed_valueslist of dict, the variables to be fixed in standard format (see set_fixed_variables())

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLE:

    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    @@ -153,13 +155,14 @@ 

    Navigation

    -find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='GLPK')
    +find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='GLPK', external_solver_name=None)

    Returns one deterministic truncated XOR differential trail.

    INPUTS:

    • solver_namestr, the solver to call

    • fixed_valueslist of dict, the variables to be fixed in standard format (see set_fixed_variables())

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLE:

    sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    @@ -414,6 +417,11 @@ 

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    property trunc_binvar
    @@ -421,11 +429,12 @@

    Navigation

    -weight_constraints(weight)
    +weight_constraints(weight, weight_precision=2)

    Return a list of variables and a list of constraints that fix the total weight to a specific value.

    INPUT:

    • weightinteger; the total weight. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
    @@ -455,13 +464,13 @@ 

    Navigation

    - +
    @@ -494,10 +503,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -505,7 +514,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/milp_models/milp_bitwise_impossible_xor_differential_model.html b/docs/build/html/cipher_modules/models/milp/milp_models/milp_bitwise_impossible_xor_differential_model.html index e4c109a3..676e9176 100644 --- a/docs/build/html/cipher_modules/models/milp/milp_models/milp_bitwise_impossible_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/milp/milp_models/milp_bitwise_impossible_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Milp bitwise impossible xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp bitwise impossible xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,20 +57,22 @@

    Navigation

    -

    Milp bitwise impossible xor differential model

    +

    Milp bitwise impossible xor differential model

    -class MilpBitwiseImpossibleXorDifferentialModel(cipher, n_window_heuristic=None)
    -

    Bases: claasp.cipher_modules.models.milp.milp_models.milp_bitwise_deterministic_truncated_xor_differential_model.MilpBitwiseDeterministicTruncatedXorDifferentialModel

    +class MilpBitwiseImpossibleXorDifferentialModel(cipher, n_window_heuristic=None, verbose=False) +

    Bases: MilpBitwiseDeterministicTruncatedXorDifferentialModel

    -add_constraints_to_build_fully_automatic_model_in_sage_milp_class(fixed_variables=[])
    +add_constraints_to_build_fully_automatic_model_in_sage_milp_class(fixed_variables=[], include_all_components=False)

    Take the constraints contained in self._model_constraints and add them to the build-in sage class.

    INPUT:

    • model_typestring; the model to solve

    • fixed_variableslist (default: []); dictionaries containing the variables to be fixed in standard format

    • +
    • include_all_componentsboolean (default: False); when set to True, every component output can be a source +of incompatibility; otherwise, only round outputs are considered

    See also

    @@ -81,7 +84,7 @@

    Navigation

    sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2) sage: milp = MilpBitwiseImpossibleXorDifferentialModel(speck) sage: milp.init_model_in_sage_milp_class() -sage: milp.aadd_constraints_to_build_fully_automatic_model_in_sage_milp_class() +sage: milp.add_constraints_to_build_fully_automatic_model_in_sage_milp_class()
    @@ -113,8 +116,8 @@

    Navigation

    -
    -add_constraints_to_build_in_sage_milp_class_with_fixed_components(component_id_list=None, fixed_variables=[])
    +
    +add_constraints_to_build_in_sage_milp_class_with_chosen_incompatible_components(component_id_list=None, fixed_variables=[])

    Take the constraints contained in self._model_constraints and add them to the build-in sage class.

    INPUT:

      @@ -133,7 +136,7 @@

      Navigation

      sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2) sage: milp = MilpBitwiseImpossibleXorDifferentialModel(speck) sage: milp.init_model_in_sage_milp_class() -sage: milp.add_constraints_to_build_in_sage_milp_class_with_fixed_components(["rot_1_4"]) +sage: milp.add_constraints_to_build_in_sage_milp_class_with_chosen_incompatible_components(["rot_1_6"])
    @@ -209,13 +212,14 @@

    Navigation

    -find_lowest_varied_patterns_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='GLPK')
    +find_lowest_varied_patterns_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='GLPK', external_solver_name=None)

    Return the solution representing a differential trail with the lowest number of unknown variables.

    INPUTS:

    • solver_namestr, the solver to call

    • fixed_valueslist of dict, the variables to be fixed in standard format (see set_fixed_variables())

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLE:

    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    @@ -233,13 +237,14 @@ 

    Navigation

    -find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='GLPK')
    +find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='GLPK', external_solver_name=None)

    Returns one deterministic truncated XOR differential trail.

    INPUTS:

    • solver_namestr, the solver to call

    • fixed_valueslist of dict, the variables to be fixed in standard format (see set_fixed_variables())

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLE:

    sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    @@ -279,7 +284,7 @@ 

    Navigation

    -find_one_bitwise_impossible_xor_differential_trail(middle_round, fixed_values=[], solver_name='GLPK')
    +find_one_bitwise_impossible_xor_differential_trail(middle_round, fixed_values=[], solver_name='GLPK', external_solver_name=None)

    Returns one bitwise impossible XOR differential trail.

    INPUTS:

      @@ -287,6 +292,7 @@

      Navigation

    • middle_roundinteger; the round number for which the incompatibility occurs

    • fixed_valueslist of dict, the variables to be fixed in standard format (see set_fixed_variables())

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLES:

    # table 9 from https://eprint.iacr.org/2014/761.pdf
    @@ -330,8 +336,8 @@ 

    Navigation

    -
    -find_one_bitwise_impossible_xor_differential_trail_with_fixed_component(component_id_list, fixed_values=[], solver_name='GLPK')
    +
    +find_one_bitwise_impossible_xor_differential_trail_with_chosen_incompatible_components(component_id_list, fixed_values=[], solver_name='GLPK', external_solver_name=None)

    Returns one bitwise impossible XOR differential trail.

    INPUTS:

      @@ -339,6 +345,7 @@

      Navigation

    • component_id_liststr; the list of component ids for which the incompatibility occurs

    • fixed_valueslist of dict, the variables to be fixed in standard format (see set_fixed_variables())

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLES:

    sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    @@ -349,7 +356,7 @@ 

    Navigation

    sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(32), bit_values=[0]*31 + [1]) sage: key = set_fixed_variables(component_id='key', constraint_type='equal', bit_positions=range(64), bit_values=[0]*64) sage: ciphertext = set_fixed_variables(component_id='cipher_output_10_13', constraint_type='equal', bit_positions=range(32), bit_values=[0]*6 + [2,0,2] + [0]*23) -sage: trail = milp.find_one_bitwise_impossible_xor_differential_trail_with_fixed_component(['intermediate_output_5_12'], fixed_values=[plaintext, key, ciphertext]) +sage: trail = milp.find_one_bitwise_impossible_xor_differential_trail_with_chosen_incompatible_components(['intermediate_output_5_12'], fixed_values=[plaintext, key, ciphertext]) sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables @@ -363,14 +370,14 @@

    Navigation

    sage: P2 = set_fixed_variables(component_id='intermediate_output_1_71', constraint_type='equal', bit_positions=range(320), bit_values= [2, 2, 0, 2, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2, 2, 0, 0, 0, 0, 2, 0, 0, 2, 2, 0, 0, 0, 0, 2, 0, 2, 0, 2, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 0, 0, 2, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0, 0, 2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 2, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 2, 0, 0, 2, 2, 0, 2, 2, 2, 2, 0, 0, 2, 2, 0, 0, 2, 2, 2, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 2, 2, 0, 0, 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 2, 0, 2, 2, 2, 0, 2, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 2, 0, 0, 0, 2, 0, 0, 2, 0, 0, 2, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 0, 0]) sage: P3 = set_fixed_variables(component_id='intermediate_output_2_71', constraint_type='equal', bit_positions=range(320), bit_values= [2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 0, 2, 2, 2, 2, 0, 2, 0, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 0, 2, 2, 2, 2, 0, 0, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 0, 2, 2, 2, 2, 0, 2, 0, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2]) sage: P5 = set_fixed_variables(component_id='cipher_output_4_71', constraint_type='equal', bit_positions=range(320), bit_values= [0]*192 + [1] + [0]* 127) -sage: trail = milp.find_one_bitwise_impossible_xor_differential_trail_with_fixed_component(["sbox_3_56"], fixed_values=[plaintext, P1, P2, P3, P4, P5]) +sage: trail = milp.find_one_bitwise_impossible_xor_differential_trail_with_chosen_incompatible_components(["sbox_3_56"], fixed_values=[plaintext, P1, P2, P3, P5])
    -find_one_bitwise_impossible_xor_differential_trail_with_fully_automatic_model(fixed_values=[], solver_name='GLPK')
    +find_one_bitwise_impossible_xor_differential_trail_with_fully_automatic_model(fixed_values=[], include_all_components=False, solver_name='GLPK', external_solver_name=None)

    Returns one bitwise impossible XOR differential trail.

    INPUTS:

      @@ -378,6 +385,9 @@

      Navigation

    • middle_roundinteger; the round number for which the incompatibility occurs

    • fixed_valueslist of dict, the variables to be fixed in standard format (see set_fixed_variables())

    • +
    • include_all_componentsboolean (default: False); when set to True, every component output can be a source +of incompatibility; otherwise, only round outputs are considered

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLES:

    # table 9 from https://eprint.iacr.org/2014/761.pdf
    @@ -622,6 +632,11 @@ 

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    property trunc_binvar
    @@ -629,11 +644,12 @@

    Navigation

    -weight_constraints(weight)
    +weight_constraints(weight, weight_precision=2)

    Return a list of variables and a list of constraints that fix the total weight to a specific value.

    INPUT:

    • weightinteger; the total weight. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
    @@ -663,13 +679,13 @@ 

    Navigation

    - +
    @@ -702,10 +718,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -713,7 +729,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/milp_models/milp_cipher_model.html b/docs/build/html/cipher_modules/models/milp/milp_models/milp_cipher_model.html index 306cc780..49003451 100644 --- a/docs/build/html/cipher_modules/models/milp/milp_models/milp_cipher_model.html +++ b/docs/build/html/cipher_modules/models/milp/milp_models/milp_cipher_model.html @@ -1,22 +1,23 @@ - + - Milp cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Milp cipher model

    +

    Milp cipher model

    class MilpCipherModel(cipher, n_window_heuristic=None)
    -

    Bases: claasp.cipher_modules.models.milp.milp_model.MilpModel

    +

    Bases: MilpModel

    property binary_variable
    @@ -231,13 +232,19 @@

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    -weight_constraints(weight)
    +weight_constraints(weight, weight_precision=2)

    Return a list of variables and a list of constraints that fix the total weight to a specific value.

    INPUT:

    • weightinteger; the total weight. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
    @@ -272,8 +279,8 @@ 

    Previous topic

    This Page

    @@ -291,7 +298,7 @@

    Quick search

    - +
    @@ -306,7 +313,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/cipher_modules/models/milp/milp_models/milp_wordwise_deterministic_truncated_xor_differential_model.html b/docs/build/html/cipher_modules/models/milp/milp_models/milp_wordwise_deterministic_truncated_xor_differential_model.html index a4a7a7cc..e430abea 100644 --- a/docs/build/html/cipher_modules/models/milp/milp_models/milp_wordwise_deterministic_truncated_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/milp/milp_models/milp_wordwise_deterministic_truncated_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Milp wordwise deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp wordwise deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Milp wordwise deterministic truncated xor differential model

    +

    Milp wordwise deterministic truncated xor differential model

    -class MilpWordwiseDeterministicTruncatedXorDifferentialModel(cipher, n_window_heuristic=None)
    -

    Bases: claasp.cipher_modules.models.milp.milp_model.MilpModel

    +class MilpWordwiseDeterministicTruncatedXorDifferentialModel(cipher, n_window_heuristic=None, verbose=False) +

    Bases: MilpModel

    add_constraints_to_build_in_sage_milp_class(fixed_bits=[], fixed_words=[])
    @@ -138,13 +139,14 @@

    Navigation

    -find_lowest_varied_patterns_wordwise_deterministic_truncated_xor_differential_trail(fixed_bits=[], fixed_words=[], solver_name='GLPK')
    +find_lowest_varied_patterns_wordwise_deterministic_truncated_xor_differential_trail(fixed_bits=[], fixed_words=[], solver_name='GLPK', external_solver_name=None)

    Return the solution representing a differential trail with the lowest number of unknown variables.

    INPUTS:

    • solver_namestr, the solver to call

    • fixed_bitslist of dict, the bit variables to be fixed in standard format

    • fixed_wordslist of dict, the word variables to be fixed in standard format

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLE:

    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    @@ -163,7 +165,7 @@ 

    Navigation

    -find_one_wordwise_deterministic_truncated_xor_differential_trail(fixed_bits=[], fixed_words=[], solver_name='GLPK')
    +find_one_wordwise_deterministic_truncated_xor_differential_trail(fixed_bits=[], fixed_words=[], solver_name='GLPK', external_solver_name=None)

    Returns one deterministic truncated XOR differential trail.

    INPUTS:

      @@ -172,6 +174,7 @@

      Navigation

      standard format (see set_fixed_variables())

    • fixed_wordslist of dict, the word variables to be fixed in standard format (see set_fixed_variables())

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLE:

    sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    @@ -393,6 +396,11 @@ 

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    property trunc_wordvar
    @@ -400,11 +408,12 @@

    Navigation

    -weight_constraints(weight)
    +weight_constraints(weight, weight_precision=2)

    Return a list of variables and a list of constraints that fix the total weight to a specific value.

    INPUT:

    • weightinteger; the total weight. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
    @@ -439,13 +448,13 @@ 

    Navigation

    Previous topic

    -

    Milp xor linear model

    +

    Solvers

    This Page

    @@ -463,7 +472,7 @@

    Quick search

    - +
    @@ -478,10 +487,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -489,7 +498,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/milp_models/milp_wordwise_impossible_xor_differential_model.html b/docs/build/html/cipher_modules/models/milp/milp_models/milp_wordwise_impossible_xor_differential_model.html index 30ba440a..eecad6d6 100644 --- a/docs/build/html/cipher_modules/models/milp/milp_models/milp_wordwise_impossible_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/milp/milp_models/milp_wordwise_impossible_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Milp wordwise impossible xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp wordwise impossible xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,20 +57,22 @@

    Navigation

    -

    Milp wordwise impossible xor differential model

    +

    Milp wordwise impossible xor differential model

    -class MilpWordwiseImpossibleXorDifferentialModel(cipher, n_window_heuristic=None)
    -

    Bases: claasp.cipher_modules.models.milp.milp_models.milp_wordwise_deterministic_truncated_xor_differential_model.MilpWordwiseDeterministicTruncatedXorDifferentialModel

    +class MilpWordwiseImpossibleXorDifferentialModel(cipher, n_window_heuristic=None, verbose=False) +

    Bases: MilpWordwiseDeterministicTruncatedXorDifferentialModel

    -add_constraints_to_build_fully_automatic_model_in_sage_milp_class(fixed_bits=[], fixed_words=[])
    +add_constraints_to_build_fully_automatic_model_in_sage_milp_class(fixed_bits=[], fixed_words=[], include_all_components=False)

    Take the constraints contained in self._model_constraints and add them to the build-in sage class.

    INPUT:

    • model_typestring; the model to solve

    • fixed_bitslist of dict, the bit variables to be fixed in standard format

    • fixed_wordslist of dict, the word variables to be fixed in standard format

    • +
    • include_all_componentsboolean (default: False); when set to True, every component output can be a source +of incompatibility; otherwise, only round outputs are considered

    See also

    @@ -115,8 +118,8 @@

    Navigation

    -
    -add_constraints_to_build_in_sage_milp_class_with_fixed_components(component_id_list=None, fixed_bits=[], fixed_words=[])
    +
    +add_constraints_to_build_in_sage_milp_class_with_chosen_incompatible_components(component_id_list=None, fixed_bits=[], fixed_words=[])

    Take the constraints contained in self._model_constraints and add them to the build-in sage class.

    INPUT:

      @@ -221,13 +224,14 @@

      Navigation

      -find_lowest_varied_patterns_wordwise_deterministic_truncated_xor_differential_trail(fixed_bits=[], fixed_words=[], solver_name='GLPK')
      +find_lowest_varied_patterns_wordwise_deterministic_truncated_xor_differential_trail(fixed_bits=[], fixed_words=[], solver_name='GLPK', external_solver_name=None)

      Return the solution representing a differential trail with the lowest number of unknown variables.

      INPUTS:

      • solver_namestr, the solver to call

      • fixed_bitslist of dict, the bit variables to be fixed in standard format

      • fixed_wordslist of dict, the word variables to be fixed in standard format

      • +
      • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

      EXAMPLE:

      sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
      @@ -246,7 +250,7 @@ 

      Navigation

      -find_one_wordwise_deterministic_truncated_xor_differential_trail(fixed_bits=[], fixed_words=[], solver_name='GLPK')
      +find_one_wordwise_deterministic_truncated_xor_differential_trail(fixed_bits=[], fixed_words=[], solver_name='GLPK', external_solver_name=None)

      Returns one deterministic truncated XOR differential trail.

      INPUTS:

        @@ -255,6 +259,7 @@

        Navigation

        standard format (see set_fixed_variables())

      • fixed_wordslist of dict, the word variables to be fixed in standard format (see set_fixed_variables())

      • +
      • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

      EXAMPLE:

      sage: from claasp.cipher_modules.models.utils import set_fixed_variables
      @@ -274,7 +279,7 @@ 

      Navigation

      -find_one_wordwise_impossible_xor_differential_trail(middle_round=None, fixed_bits=[], fixed_words=[], solver_name='GLPK')
      +find_one_wordwise_impossible_xor_differential_trail(middle_round=None, fixed_bits=[], fixed_words=[], solver_name='GLPK', external_solver_name=None)

      Returns one wordwise impossible XOR differential trail.

      INPUTS:

        @@ -282,6 +287,7 @@

        Navigation

      • middle_roundinteger; the round number for which the incompatibility occurs

      • fixed_bitslist of dict, the bit variables to be fixed in standard format

      • fixed_wordslist of dict, the word variables to be fixed in standard format

      • +
      • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

      EXAMPLE:

      sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
      @@ -295,8 +301,8 @@ 

      Navigation

      -
      -find_one_wordwise_impossible_xor_differential_trail_with_fixed_component(component_id_list, fixed_bits=[], fixed_words=[], solver_name='GLPK')
      +
      +find_one_wordwise_impossible_xor_differential_trail_with_chosen_components(component_id_list, fixed_bits=[], fixed_words=[], solver_name='GLPK', external_solver_name=None)

      Returns one wordwise impossible XOR differential trail.

      INPUTS:

        @@ -304,6 +310,7 @@

        Navigation

      • component_id_liststr; the list of component ids for which the incompatibility occurs

      • fixed_bitslist of dict, the bit variables to be fixed in standard format

      • fixed_wordslist of dict, the word variables to be fixed in standard format

      • +
      • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

      EXAMPLE:

      sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
      @@ -311,20 +318,23 @@ 

      Navigation

      sage: aes = AESBlockCipher(number_of_rounds=2) sage: from claasp.cipher_modules.models.milp.milp_models.milp_wordwise_impossible_xor_differential_model import MilpWordwiseImpossibleXorDifferentialModel sage: milp = MilpWordwiseImpossibleXorDifferentialModel(aes) -sage: trail = milp.find_one_wordwise_impossible_xor_differential_trail_with_fixed_component(['mix_column_0_21'], get_single_key_scenario_format_for_fixed_values(aes)) +sage: trail = milp.find_one_wordwise_impossible_xor_differential_trail_with_chosen_components(['mix_column_0_21'], get_single_key_scenario_format_for_fixed_values(aes))
      -find_one_wordwise_impossible_xor_differential_trail_with_fully_automatic_model(fixed_bits=[], fixed_words=[], solver_name='GLPK')
      +find_one_wordwise_impossible_xor_differential_trail_with_fully_automatic_model(fixed_bits=[], fixed_words=[], include_all_components=False, solver_name='GLPK', external_solver_name=None)

      Returns one wordwise impossible XOR differential trail.

      INPUTS:

      • solver_namestr, the solver to call

      • fixed_bitslist of dict, the bit variables to be fixed in standard format

      • fixed_wordslist of dict, the word variables to be fixed in standard format

      • +
      • include_all_componentsboolean (default: False); when set to True, every component output can be a source +of incompatibility; otherwise, only round outputs are considered

      • +
      • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

      EXAMPLE:

      sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
      @@ -541,6 +551,11 @@ 

      Navigation

      +
      +
      +solver_names(verbose=False)
      +
      +
      property trunc_wordvar
      @@ -548,11 +563,12 @@

      Navigation

      -weight_constraints(weight)
      +weight_constraints(weight, weight_precision=2)

      Return a list of variables and a list of constraints that fix the total weight to a specific value.

      INPUT:

      • weightinteger; the total weight. If negative, no constraints on the weight is added

      • +
      • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

      EXAMPLES:

      sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
      @@ -587,8 +603,8 @@ 

      Navigation

      - +
      @@ -629,7 +645,7 @@

      Navigation

      next |
    • - previous |
    • @@ -637,7 +653,7 @@

      Navigation

      - +
    diff --git a/docs/build/html/cipher_modules/models/milp/milp_models/milp_xor_differential_model.html b/docs/build/html/cipher_modules/models/milp/milp_models/milp_xor_differential_model.html index 57b64e42..794125b4 100644 --- a/docs/build/html/cipher_modules/models/milp/milp_models/milp_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/milp/milp_models/milp_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Milp xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,20 +57,21 @@

    Navigation

    -

    Milp xor differential model

    +

    Milp xor differential model

    -class MilpXorDifferentialModel(cipher, n_window_heuristic=None)
    -

    Bases: claasp.cipher_modules.models.milp.milp_model.MilpModel

    +class MilpXorDifferentialModel(cipher, n_window_heuristic=None, verbose=False) +

    Bases: MilpModel

    -add_constraints_to_build_in_sage_milp_class(weight=- 1, fixed_variables=[])
    +add_constraints_to_build_in_sage_milp_class(weight=- 1, weight_precision=2, fixed_variables=[])

    Take the constraints contained in self._model_constraints and add them to the build-in sage class.

    INPUT:

    • model_typestring; the model to solve

    • weightinteger (default: -1); the total weight. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    • fixed_variableslist (default: []); dictionaries containing the variables to be fixed in standard format

    @@ -179,8 +181,9 @@

    Navigation

    -find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='GLPK', external_solver_name=None)
    -

    Return all the XOR differential trails with weight equal to fixed_weight as a list in standard format.

    +find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None) +

    Return all the XOR differential trails with weight equal to fixed_weight as a list in standard format. +By default, the search is set in the single-key setting.

    See also

    convert_solver_solution_to_dictionary()

    @@ -195,27 +198,41 @@

    Navigation

  • fixed_weightinteger; the weight found using find_lowest_weight_xor_differential_trail()

  • fixed_valueslist (default: []); each dictionary contains variables values whose output need to be fixed

  • +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • solver_namestring (default: GLPK); the name of the solver (if needed)

  • -
    -
    EXAMPLES::

    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values -sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=2) -sage: milp = MilpXorDifferentialModel(speck) -sage: trails = milp.find_all_xor_differential_trails_with_fixed_weight(1, get_single_key_scenario_format_for_fixed_values(speck)) # long -… -sage: len(trails) # long -6

    -
    -
    +

    EXAMPLES:

    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: milp = MilpXorDifferentialModel(speck)
    +sage: trails = milp.find_all_xor_differential_trails_with_fixed_weight(9) # long
    +...
    +sage: len(trails)
    +2
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: milp = MilpXorDifferentialModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trails = milp.find_all_xor_differential_trails_with_fixed_weight(2, fixed_values=[key]) # long
    +...
    +sage: len(trails)
    +2
    +
    +
    -find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='GLPK', external_solver_name=None)
    -

    Return all XOR differential trails with weight greater than min_weight and lower/equal to max_weight.

    -

    The value returned is a list of solutions in standard format.

    +find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None) +

    Return all XOR differential trails with weight greater than min_weight and lower/equal to max_weight. +By default, the search is set in the single-key setting. +The value returned is a list of solutions in standard format.

    See also

    convert_solver_solution_to_dictionary().

    @@ -231,26 +248,41 @@

    Navigation

  • max_weightinteger; the upper bound for the weight.

  • fixed_valueslist (default: []); each dictionary contains variables values whose output need to be fixed

  • +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • solver_namestring (default: GLPK); the name of the solver (if needed)

  • +
  • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

  • EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: milp = MilpXorDifferentialModel(speck)
    +sage: trails = milp.find_all_xor_differential_trails_with_weight_at_most(9, 10) # long
    +...
    +sage: len(trails)
    +28
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=2)
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: milp = MilpXorDifferentialModel(speck)
    -sage: trails = milp.find_all_xor_differential_trails_with_weight_at_most(0, 1, get_single_key_scenario_format_for_fixed_values(speck)) # long
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trails = milp.find_all_xor_differential_trails_with_weight_at_most(2, 3, fixed_values=[key]) # long
     ...
    -sage: len(trails) # long
    -7
    +sage: len(trails)
    +9
     
    -find_lowest_weight_xor_differential_trail(fixed_values=[], solver_name='GLPK', external_solver_name=False)
    -

    Return a XOR differential trail with the lowest weight in standard format, i.e. the solver solution.

    +find_lowest_weight_xor_differential_trail(fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=False) +

    Return a XOR differential trail with the lowest weight in standard format, i.e. the solver solution. +By default, the search is set in the single-key setting.

    See also

    convert_solver_solution_to_dictionary()

    @@ -259,15 +291,29 @@

    Navigation

    • fixed_valueslist (default: []); each dictionary contains variables values whose output need to be fixed

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    • solver_namestring (default: GLPK); the name of the solver (if needed)

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    +
    # single-key setting
    +from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: milp = MilpXorDifferentialModel(speck)
    +sage: trail = milp.find_lowest_weight_xor_differential_trail()
    +...
    +sage: trail["total_weight"]
    +9.0
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: milp = MilpXorDifferentialModel(speck)
    -sage: trail = milp.find_lowest_weight_xor_differential_trail(get_single_key_scenario_format_for_fixed_values(speck))
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trail = milp.find_lowest_weight_xor_differential_trail(fixed_values=[key])
     ...
     sage: trail["total_weight"]
     1.0
    @@ -277,8 +323,9 @@ 

    Navigation

    -find_one_xor_differential_trail(fixed_values=[], solver_name='GLPK', external_solver_name=None)
    -

    Return a XOR differential trail, not necessarily the one with the lowest weight.

    +find_one_xor_differential_trail(fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None) +

    Return a XOR differential trail, not necessarily the one with the lowest weight. +By default, the search is set in the single-key setting.

    INPUT:

    • @@ -286,27 +333,39 @@

      Navigation

    +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • solver_namestring (default: GLPK); the solver to call

  • +
  • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

  • EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    +
    # single-key setting
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: milp = MilpXorDifferentialModel(speck)
    -sage: trail = milp.find_one_xor_differential_trail(get_single_key_scenario_format_for_fixed_values(speck)) # random
    +sage: trail = milp.find_one_xor_differential_trail() # random
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: milp = MilpXorDifferentialModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trail = milp.find_one_xor_differential_trail(fixed_values=[key]) # random
     
    -find_one_xor_differential_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='GLPK', external_solver_name=None)
    -

    Return one XOR differential trail with weight equal to fixed_weight as a list in standard format.

    +find_one_xor_differential_trail_with_fixed_weight(fixed_weight, fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None) +

    Return one XOR differential trail with weight equal to fixed_weight as a list in standard format. +By default, the search is set in the single-key setting.

    INPUT:

    +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • solver_namestring (default: GLPK); the solver to call

  • +
  • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

  • EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    +
    # single-key setting
    +from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: milp = MilpXorDifferentialModel(speck)
    +sage: trail = milp.find_one_xor_differential_trail_with_fixed_weight(3) # random
    +sage: trail['total_weight']
    +3.0
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: milp = MilpXorDifferentialModel(speck)
    -sage: trail = milp.find_one_xor_differential_trail_with_fixed_weight(5, get_single_key_scenario_format_for_fixed_values(speck))
    -...
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: trail = milp.find_one_xor_differential_trail_with_fixed_weight(3, fixed_values=[key]) # random
    +sage: trail['total_weight']
    +3.0
     
    @@ -378,11 +451,6 @@

    Navigation

    -
    -
    -get_fixed_variables_for_all_xor_differential_trails_with_weight_at_most(fixed_values, inputs_ids, solution)
    -
    -
    init_model_in_sage_milp_class(solver_name='GLPK')
    @@ -423,12 +491,11 @@

    Navigation

    be fixed

    -
    EXAMPLES::

    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher +

    EXAMPLES::

    from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_differential_model import MilpXorDifferentialModel sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=2) sage: milp = MilpXorDifferentialModel(speck) -sage: milp.is_single_key(get_single_key_scenario_format_for_fixed_values(speck)) +sage: milp.is_single_key(speck) True

    @@ -488,13 +555,19 @@

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    -weight_constraints(weight)
    +weight_constraints(weight, weight_precision=2)

    Return a list of variables and a list of constraints that fix the total weight to a specific value.

    INPUT:

    • weightinteger; the total weight. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
    @@ -511,6 +584,11 @@ 

    Navigation

    +
    +
    +property weight_precision
    +
    +
    @@ -524,13 +602,13 @@

    Navigation

    - +
    @@ -563,10 +641,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -574,7 +652,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/milp_models/milp_xor_linear_model.html b/docs/build/html/cipher_modules/models/milp/milp_models/milp_xor_linear_model.html index 05954a28..e8189700 100644 --- a/docs/build/html/cipher_modules/models/milp/milp_models/milp_xor_linear_model.html +++ b/docs/build/html/cipher_modules/models/milp/milp_models/milp_xor_linear_model.html @@ -1,23 +1,24 @@ - + - Milp xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,20 +57,21 @@

    Navigation

    -

    Milp xor linear model

    +

    Milp xor linear model

    -class MilpXorLinearModel(cipher, n_window_heuristic=None)
    -

    Bases: claasp.cipher_modules.models.milp.milp_model.MilpModel

    +class MilpXorLinearModel(cipher, n_window_heuristic=None, verbose=False) +

    Bases: MilpModel

    -add_constraints_to_build_in_sage_milp_class(weight=- 1, fixed_variables=[])
    +add_constraints_to_build_in_sage_milp_class(weight=- 1, weight_precision=2, fixed_variables=[])

    Take the constraints contained in self._model_constraints and add them to the build-in sage class.

    INPUT:

    • model_typestring; the model to solve

    • weightinteger (default: -1); the total weight. It is the negative base-2 logarithm of the total correlation of the trail. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    • fixed_variableslist (default: []); dictionaries containing the variables to be fixed in standard format

    @@ -207,8 +209,9 @@

    Navigation

    -find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=None, solver_name='GLPK', external_solver_name=None)
    +find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None)

    Return all the XOR linear trails with weight equal to fixed_weight as a solutions list in standard format. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    See also

    @@ -222,31 +225,41 @@

    Navigation

    INPUT:

    • fixed_weightinteger; the weight found using find_lowest_weight_xor_linear_trail()

    • -
    • fixed_valueslist (default: None); each dictionary contains variables values whose output need +

    • fixed_valueslist (default: []); each dictionary contains variables values whose output need to be fixed

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    • solver_namestring (default: GLPK); the name of the solver (if needed)

    EXAMPLES:

    sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel
    -sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=3)
    -sage: milp = MilpXorLinearModel(speck.remove_key_schedule())
    -sage: plaintext = set_fixed_variables(
    -....: component_id='plaintext', constraint_type='not equal',
    -....: bit_positions=range(8), bit_values=integer_to_bit_list(0x0, 8, 'big'))
    -sage: trails = milp.find_all_xor_linear_trails_with_fixed_weight(1, fixed_values = [plaintext])  # long
    +sage: milp = MilpXorLinearModel(speck)
    +sage: trails = milp.find_all_xor_linear_trails_with_fixed_weight(1)
     ...
    -sage: len(trails) # long
    +sage: len(trails)
     12
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: milp = MilpXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trails = milp.find_all_xor_linear_trails_with_fixed_weight(2, fixed_values=[key]) # long
    +...
    +sage: len(trails)
    +8
     
    -find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='GLPK', external_solver_name=None)
    -

    Return all XOR linear trails with weight greater than min_weight and lower than or equal to max_weight.

    +find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None) +

    Return all XOR linear trails with weight greater than min_weight and lower than or equal to max_weight. +By default, the search removes the key schedule, if any.

    The value returned is a list of solutions in standard format.

    See also

    @@ -263,27 +276,40 @@

    Navigation

  • max_weightinteger; the upper bound for the weight

  • fixed_valueslist (default: []); each dictionary contains variables values whose output need to be fixed

  • +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • solver_namestring (default: GLPK); the name of the solver (if needed)

  • +
  • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

  • EXAMPLES:

    sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
     sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=3)
    -sage: milp = MilpXorLinearModel(speck.remove_key_schedule())
    -sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='not equal', bit_positions=range(8), bit_values=integer_to_bit_list(0x0, 8, 'big'))
    -sage: trails = milp.find_all_xor_linear_trails_with_weight_at_most(0,1,[plaintext]) # long
    +sage: milp = MilpXorLinearModel(speck)
    +sage: trails = milp.find_all_xor_linear_trails_with_weight_at_most(0,1)
     ...
    -sage: len(trails) # long
    +sage: len(trails)
     13
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: milp = MilpXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trails = milp.find_all_xor_linear_trails_with_weight_at_most(0, 3, fixed_values=[key]) # long
    +...
    +sage: len(trails)
    +73
     
    -find_lowest_weight_xor_linear_trail(fixed_values=[], solver_name='GLPK', external_solver_name=None)
    +find_lowest_weight_xor_linear_trail(fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None)

    Return a XOR linear trail with the lowest weight in standard format, i.e. the solver solution. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    See also

    @@ -295,7 +321,9 @@

    Navigation

    • fixed_valueslist (default: []); each dictionary contains variables values whose output need to be fixed

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    • solver_namestring (default: GLPK); the name of the solver (if needed)

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    EXAMPLES:

    # To reproduce the trail of Table 6 from https://eprint.iacr.org/2016/407.pdf run:
    @@ -303,7 +331,7 @@ 

    Navigation

    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=9) -sage: milp = MilpXorLinearModel(speck.remove_key_schedule()) +sage: milp = MilpXorLinearModel(speck) sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(32), bit_values=integer_to_bit_list(0x03805224, 32, 'big')) sage: trail = milp.find_lowest_weight_xor_linear_trail(fixed_values=[plaintext]) # doctest: +SKIP ... @@ -315,26 +343,40 @@

    Navigation

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables sage: simon = SimonBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=13) -sage: milp = MilpXorLinearModel(simon.remove_key_schedule()) +sage: milp = MilpXorLinearModel(simon) sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(32), bit_values=integer_to_bit_list(0x00200000, 32, 'big')) sage: trail = milp.find_lowest_weight_xor_linear_trail(fixed_values=[plaintext]) # doctest: +SKIP ... sage: trail["total_weight"] # doctest: +SKIP 18.0 + +# including the key schedule in the model +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher +sage: from claasp.cipher_modules.models.utils import set_fixed_variables +sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=4) +sage: milp = MilpXorLinearModel(speck) +sage: key = set_fixed_variables('key', 'not_equal', list(range(32)), [0] * 32) +sage: trail = milp.find_lowest_weight_xor_linear_trail(fixed_values=[key]) +sage: trail["total_weight"] +3.0
    -find_one_xor_linear_trail(fixed_values=[], solver_name='GLPK', external_solver_name=None)
    +find_one_xor_linear_trail(fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None)

    Return a XOR linear trail, not necessarily the one with the lowest weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    INPUT:

    • fixed_valueslist (default: []); dictionaries containing the variables to be fixed in standard format (see )

    • -
    • solver_namestring (default: GLPK); the solver to call

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    • +
    • solver_namestring (default: GLPK); the name of the solver (if needed)

    • +
    • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

    See also

    @@ -343,19 +385,27 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
     sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: milp = MilpXorLinearModel(speck.remove_key_schedule())
    -sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(32), bit_values=integer_to_bit_list(0x03805224, 32, 'big'))
    -sage: trail = milp.find_one_xor_linear_trail(fixed_values=[plaintext]) # random
    +sage: milp = MilpXorLinearModel(speck)
    +sage: trail = milp.find_one_xor_linear_trail() # random
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: milp = MilpXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(32)), [0] * 32)
    +sage: trail = milp.find_one_xor_linear_trail(fixed_values=[key]) # random
     
    -find_one_xor_linear_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='GLPK', external_solver_name=None)
    +find_one_xor_linear_trail_with_fixed_weight(fixed_weight, fixed_values=[], weight_precision=2, solver_name='GLPK', external_solver_name=None)

    Return one XOR linear trail with weight equal to fixed_weight as a list in standard format. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    INPUT:

      @@ -365,7 +415,9 @@

      Navigation

    -
  • solver_namestring (default: GLPK); the solver to call

  • +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • +
  • solver_namestring (default: GLPK); the name of the solver (if needed)

  • +
  • external_solver_namestring (default: None); if specified, the library will write the internal Sagemath MILP model as a .lp file and solve it outside of Sagemath, using the external solver.

  • See also

    @@ -375,9 +427,21 @@

    Navigation

    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel
     sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: milp = MilpXorLinearModel(speck.remove_key_schedule())
    -sage: trail = milp.find_one_xor_linear_trail_with_fixed_weight(6)
    +sage: milp = MilpXorLinearModel(speck)
    +sage: trail = milp.find_one_xor_linear_trail_with_fixed_weight(6) # random
     ...
    +sage: trail['total_weight']
    +6.0
    +
    +sage: from claasp.cipher_modules.models.milp.milp_models.milp_xor_linear_model import MilpXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: milp = MilpXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trail = milp.find_one_xor_linear_trail_with_fixed_weight(3, fixed_values=[key]) # random
    +sage: trail["total_weight"]
    +3.0
     
    @@ -468,11 +532,6 @@

    Navigation

    -
    -
    -get_fixed_variables_for_all_xor_linear_trails_with_weight_at_most(fixed_values, inputs_ids, solution)
    -
    -
    init_model_in_sage_milp_class(solver_name='GLPK')
    @@ -557,6 +616,11 @@

    Navigation

    +
    +
    +solver_names(verbose=False)
    +
    +
    update_xor_linear_constraints_for_more_than_two_bits(constraints, input_vars, number_of_inputs, output_var, x)
    @@ -564,11 +628,12 @@

    Navigation

    -weight_constraints(weight)
    +weight_constraints(weight, weight_precision=2)

    Return a list of variables and a list of constraints that fix the total weight to a specific value.

    INPUT:

    • weightinteger; the total weight. If negative, no constraints on the weight is added

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
    @@ -585,15 +650,21 @@ 

    Navigation

    +
    +
    +property weight_precision
    +
    +
    -weight_xor_linear_constraints(weight)
    +weight_xor_linear_constraints(weight, weight_precision)

    Return a list of variables and a list of constraints that fix the total weight to a specific value. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    INPUT:

    • weightinteger; the total weight. By default, it is the negative base-2 logarithm of the total correlation of the trail.

    • +
    • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

    EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher
    @@ -623,13 +694,13 @@ 

    Navigation

    - +
    @@ -662,10 +733,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -673,7 +744,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/solvers.html b/docs/build/html/cipher_modules/models/milp/solvers.html new file mode 100644 index 00000000..3ffefbb0 --- /dev/null +++ b/docs/build/html/cipher_modules/models/milp/solvers.html @@ -0,0 +1,221 @@ + + + + + + + + + Solvers — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Solvers

    +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/milp/tmp/tea_cipher_xordiff_model.html b/docs/build/html/cipher_modules/models/milp/tmp/tea_cipher_xordiff_model.html index 77ec3028..e3252f88 100644 --- a/docs/build/html/cipher_modules/models/milp/tmp/tea_cipher_xordiff_model.html +++ b/docs/build/html/cipher_modules/models/milp/tmp/tea_cipher_xordiff_model.html @@ -1,23 +1,24 @@ - + - Tea cipher xordiff model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Tea cipher xordiff model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Tea cipher xordiff model

    +

    Tea cipher xordiff model

    @@ -68,13 +69,13 @@

    Tea cipher xordiff model

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_input_pattern_inequalities.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_input_pattern_inequalities.html index 89ee074b..4eb33ace 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_input_pattern_inequalities.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_input_pattern_inequalities.html @@ -1,23 +1,24 @@ - + - Dictionary containing truncated input pattern inequalities — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary containing truncated input pattern inequalities — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary containing truncated input pattern inequalities

    +

    Dictionary containing truncated input pattern inequalities

    @@ -68,13 +69,13 @@

    Dictionary containing truncated input pattern inequalities

    Next topic

    -

    Milp name mappings

    +

    Utils

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_mds_inequalities.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_mds_inequalities.html index 91af9001..f5e0174d 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_mds_inequalities.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_mds_inequalities.html @@ -1,22 +1,23 @@ - + - Dictionary containing truncated mds inequalities — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary containing truncated mds inequalities — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary containing truncated mds inequalities

    +

    Dictionary containing truncated mds inequalities

    @@ -73,8 +74,8 @@

    Previous topic

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,7 +108,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_xor_inequalities_between_n_input_bits.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_xor_inequalities_between_n_input_bits.html index 2cec4185..b863157b 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_xor_inequalities_between_n_input_bits.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_truncated_xor_inequalities_between_n_input_bits.html @@ -1,23 +1,24 @@ - + - Dictionary containing truncated xor inequalities between n input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary containing truncated xor inequalities between n input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary containing truncated xor inequalities between n input bits

    +

    Dictionary containing truncated xor inequalities between n input bits

    @@ -68,13 +69,13 @@

    Dictionary containing truncated xor inequalities between n input bits

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_xor_inequalities_between_n_input_bits.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_xor_inequalities_between_n_input_bits.html index 20c10c9c..cc744de7 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_xor_inequalities_between_n_input_bits.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_containing_xor_inequalities_between_n_input_bits.html @@ -1,23 +1,24 @@ - + - Dictionary containing xor inequalities between n input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary containing xor inequalities between n input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary containing xor inequalities between n input bits

    +

    Dictionary containing xor inequalities between n input bits

    @@ -68,13 +69,13 @@

    Dictionary containing xor inequalities between n input bits

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes.html index 2d2e2200..e2a0fc29 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes.html @@ -1,23 +1,24 @@ - + - Dictionary that contains inequalities for large sboxes — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary that contains inequalities for large sboxes — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary that contains inequalities for large sboxes

    +

    Dictionary that contains inequalities for large sboxes

    @@ -68,13 +69,13 @@

    Dictionary that contains inequalities for large sboxes

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes_xor_linear.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes_xor_linear.html index 1638a789..64d6282f 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes_xor_linear.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_large_sboxes_xor_linear.html @@ -1,23 +1,24 @@ - + - Dictionary that contains inequalities for large sboxes xor linear — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary that contains inequalities for large sboxes xor linear — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary that contains inequalities for large sboxes xor linear

    +

    Dictionary that contains inequalities for large sboxes xor linear

    @@ -68,8 +69,8 @@

    Dictionary that contains inequalities for large sboxes xor linear

    Next topic

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -110,7 +111,7 @@

    Navigation

    next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_sboxes_with_undisturbed_bits.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_sboxes_with_undisturbed_bits.html index 6263078c..b2fbf8f7 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_sboxes_with_undisturbed_bits.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_sboxes_with_undisturbed_bits.html @@ -1,23 +1,24 @@ - + - Dictionary that contains inequalities for sboxes with undisturbed bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary that contains inequalities for sboxes with undisturbed bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary that contains inequalities for sboxes with undisturbed bits

    +

    Dictionary that contains inequalities for sboxes with undisturbed bits

    @@ -68,13 +69,13 @@

    Dictionary that contains inequalities for sboxes with undisturbed bits

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes.html index 6825bdb7..c191ceec 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes.html @@ -1,23 +1,24 @@ - + - Dictionary that contains inequalities for small sboxes — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary that contains inequalities for small sboxes — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary that contains inequalities for small sboxes

    +

    Dictionary that contains inequalities for small sboxes

    @@ -68,13 +69,13 @@

    Dictionary that contains inequalities for small sboxes

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes_xor_linear.html b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes_xor_linear.html index 2f602272..079fe666 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes_xor_linear.html +++ b/docs/build/html/cipher_modules/models/milp/utils/dictionary_that_contains_inequalities_for_small_sboxes_xor_linear.html @@ -1,23 +1,24 @@ - + - Dictionary that contains inequalities for small sboxes xor linear — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dictionary that contains inequalities for small sboxes xor linear — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dictionary that contains inequalities for small sboxes xor linear

    +

    Dictionary that contains inequalities for small sboxes xor linear

    @@ -68,13 +69,13 @@

    Dictionary that contains inequalities for small sboxes xor linear

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_and_operation_2_input_bits.html b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_and_operation_2_input_bits.html index 3e6a1db7..1b326e9e 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_and_operation_2_input_bits.html +++ b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_and_operation_2_input_bits.html @@ -1,23 +1,24 @@ - + - Generate inequalities for and operation 2 input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generate inequalities for and operation 2 input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generate inequalities for and operation 2 input bits

    +

    Generate inequalities for and operation 2 input bits

    and_LAT()
    @@ -113,13 +114,13 @@

    Navigation

    - +
    @@ -152,10 +153,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -163,7 +164,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_large_sboxes.html b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_large_sboxes.html index 7745ddc1..f4011437 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_large_sboxes.html +++ b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_large_sboxes.html @@ -1,23 +1,24 @@ - + - Generate inequalities for large sboxes — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generate inequalities for large sboxes — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generate inequalities for large sboxes

    +

    Generate inequalities for large sboxes

    The logic minimizer espresso is required for this module. It is already installed in the docker.

    @@ -103,13 +104,13 @@

    Navigation

    - +
    @@ -142,10 +143,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -153,7 +154,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_mds_matrices.html b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_mds_matrices.html index 66adb9b2..644f0335 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_mds_matrices.html +++ b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_mds_matrices.html @@ -1,23 +1,24 @@ - + - Generate inequalities for wordwise truncated mds matrices — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generate inequalities for wordwise truncated mds matrices — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generate inequalities for wordwise truncated mds matrices

    +

    Generate inequalities for wordwise truncated mds matrices

    using model 5 from https://tosc.iacr.org/index.php/ToSC/article/view/8702/8294

    @@ -90,13 +91,13 @@

    Navigation

    - +
    @@ -129,10 +130,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -140,7 +141,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_xor_with_n_input_bits.html b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_xor_with_n_input_bits.html index 69dca2db..4b536d82 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_xor_with_n_input_bits.html +++ b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_wordwise_truncated_xor_with_n_input_bits.html @@ -1,23 +1,24 @@ - + - Generate inequalities for wordwise truncated xor with n input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generate inequalities for wordwise truncated xor with n input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generate inequalities for wordwise truncated xor with n input bits

    +

    Generate inequalities for wordwise truncated xor with n input bits

    delete_dictionary_that_contains_wordwise_truncated_input_inequalities()
    @@ -150,13 +151,13 @@

    Navigation

    - +
    @@ -189,10 +190,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -200,7 +201,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_xor_with_n_input_bits.html b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_xor_with_n_input_bits.html index eff23d7a..e06dbeca 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_xor_with_n_input_bits.html +++ b/docs/build/html/cipher_modules/models/milp/utils/generate_inequalities_for_xor_with_n_input_bits.html @@ -1,23 +1,24 @@ - + - Generate inequalities for xor with n input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generate inequalities for xor with n input bits — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generate inequalities for xor with n input bits

    +

    Generate inequalities for xor with n input bits

    delete_dictionary_that_contains_xor_inequalities()
    @@ -98,13 +99,13 @@

    Navigation

    - +
    @@ -137,10 +138,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -148,7 +149,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/generate_sbox_inequalities_for_trail_search.html b/docs/build/html/cipher_modules/models/milp/utils/generate_sbox_inequalities_for_trail_search.html index 7510dded..e21adb9b 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/generate_sbox_inequalities_for_trail_search.html +++ b/docs/build/html/cipher_modules/models/milp/utils/generate_sbox_inequalities_for_trail_search.html @@ -1,23 +1,24 @@ - + - Generate sbox inequalities for trail search — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generate sbox inequalities for trail search — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    @@ -198,10 +199,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -209,7 +210,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/generate_undisturbed_bits_inequalities_for_sboxes.html b/docs/build/html/cipher_modules/models/milp/utils/generate_undisturbed_bits_inequalities_for_sboxes.html index baf58ee4..38d1830d 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/generate_undisturbed_bits_inequalities_for_sboxes.html +++ b/docs/build/html/cipher_modules/models/milp/utils/generate_undisturbed_bits_inequalities_for_sboxes.html @@ -1,23 +1,24 @@ - + - Generate undisturbed bits inequalities for sboxes — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Generate undisturbed bits inequalities for sboxes — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Generate undisturbed bits inequalities for sboxes

    +

    Generate undisturbed bits inequalities for sboxes

    truncated xor differential model, by using espresso. It uses the notion of undisturbed differential bits discussed in https://link.springer.com/chapter/10.1007/978-3-031-26553-2_3

    The logic minimizer espresso is required for this module. It is already installed in the docker.

    @@ -96,13 +97,13 @@

    Navigation

    - +
    @@ -135,10 +136,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -146,7 +147,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/milp_name_mappings.html b/docs/build/html/cipher_modules/models/milp/utils/milp_name_mappings.html index 80fdba10..553db4b3 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/milp_name_mappings.html +++ b/docs/build/html/cipher_modules/models/milp/utils/milp_name_mappings.html @@ -1,23 +1,24 @@ - + - Milp name mappings — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Milp name mappings — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Milp name mappings

    +

    Milp name mappings

    @@ -68,13 +69,13 @@

    Navigation

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/milp_truncated_utils.html b/docs/build/html/cipher_modules/models/milp/utils/milp_truncated_utils.html new file mode 100644 index 00000000..5870d989 --- /dev/null +++ b/docs/build/html/cipher_modules/models/milp/utils/milp_truncated_utils.html @@ -0,0 +1,236 @@ + + + + + + + + + Milp truncated utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Milp truncated utils

    +
    +
    +fix_variables_value_deterministic_truncated_xor_differential_constraints(milp_model, model_variables, fixed_variables=[])
    +
    + +
    +
    +generate_all_incompatibility_constraints_for_fully_automatic_model(model, model_type, x, x_class, include_all_components)
    +
    + +
    +
    +generate_incompatiblity_constraints_for_component(model, model_type, x, x_class, backward_component, include_all_components)
    +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/milp/utils/mzn_predicates.html b/docs/build/html/cipher_modules/models/milp/utils/mzn_predicates.html index 4942702c..d13e2c89 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/mzn_predicates.html +++ b/docs/build/html/cipher_modules/models/milp/utils/mzn_predicates.html @@ -1,23 +1,24 @@ - + - Mzn predicates — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Mzn predicates — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Mzn predicates

    +

    Mzn predicates

    get_word_operations()
    @@ -73,13 +74,13 @@

    Navigation

    - +
    @@ -112,10 +113,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -123,7 +124,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/milp/utils/utils.html b/docs/build/html/cipher_modules/models/milp/utils/utils.html index ad3f46f6..e1a3389c 100644 --- a/docs/build/html/cipher_modules/models/milp/utils/utils.html +++ b/docs/build/html/cipher_modules/models/milp/utils/utils.html @@ -1,23 +1,24 @@ - + - Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Utils

    +

    Utils

    delete_espresso_dictionary(file_path)
    @@ -67,11 +68,6 @@

    Navigation

    espresso_pos_to_constraints(espresso_inequalities, variables)
    -
    -
    -fix_variables_value_deterministic_truncated_xor_differential_constraints(milp_model, model_variables, fixed_variables=[])
    -
    -
    generate_espresso_input(valid_points)
    @@ -354,13 +350,13 @@

    Navigation

    - +
    @@ -393,10 +389,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -404,7 +400,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/minizinc/minizinc_model.html b/docs/build/html/cipher_modules/models/minizinc/minizinc_model.html index bfe8b4ba..85f95be3 100644 --- a/docs/build/html/cipher_modules/models/minizinc/minizinc_model.html +++ b/docs/build/html/cipher_modules/models/minizinc/minizinc_model.html @@ -1,23 +1,24 @@ - + - Minizinc model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Minizinc model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Minizinc model

    +

    Minizinc model

    class MinizincModel(cipher, window_size_list=None, probability_weight_per_round=None, sat_or_milp='sat')
    @@ -243,8 +244,8 @@

    Navigation

    Previous topic

    -

    Utils

    +

    Boolean polynomial ring

    Next topic

    @@ -267,7 +268,7 @@

    Quick search

    - +
    @@ -285,7 +286,7 @@

    Navigation

    next |
  • - previous |
  • @@ -293,7 +294,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model.html b/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model.html new file mode 100644 index 00000000..dcb27a5c --- /dev/null +++ b/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_boomerang_model.html @@ -0,0 +1,221 @@ + + + + + + + + + Minizinc boomerang model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Minizinc boomerang model

    +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_cipher_model.html b/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_cipher_model.html index e4804e8a..96f6cd9a 100644 --- a/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_cipher_model.html +++ b/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_cipher_model.html @@ -1,23 +1,24 @@ - + - Minizinc cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Minizinc cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Minizinc cipher model

    +

    Minizinc cipher model

    class MinizincCipherModel(cipher, window_size_list=None, probability_weight_per_round=None, sat_or_milp='sat')
    -

    Bases: claasp.cipher_modules.models.minizinc.minizinc_model.MinizincModel

    +

    Bases: MinizincModel

    add_comment(comment)
    @@ -263,13 +264,13 @@

    Navigation

    - +
    @@ -302,10 +303,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -313,7 +314,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_deterministic_truncated_xor_differential_model.html b/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_deterministic_truncated_xor_differential_model.html index 8bd682f3..54ae67b1 100644 --- a/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_deterministic_truncated_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_deterministic_truncated_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Minizinc deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Minizinc deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Minizinc deterministic truncated xor differential model

    +

    Minizinc deterministic truncated xor differential model

    class MinizincDeterministicTruncatedXorDifferentialModel(cipher, window_size_list=None, probability_weight_per_round=None, sat_or_milp='sat')
    -

    Bases: claasp.cipher_modules.models.minizinc.minizinc_model.MinizincModel

    +

    Bases: MinizincModel

    add_comment(comment)
    @@ -263,13 +264,13 @@

    Navigation

    This Page

    @@ -287,7 +288,7 @@

    Quick search

    - +
    @@ -302,10 +303,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -313,7 +314,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_xor_differential_model.html b/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_xor_differential_model.html index 04958680..cfc11a7b 100644 --- a/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/minizinc/minizinc_models/minizinc_xor_differential_model.html @@ -1,22 +1,23 @@ - + - Minizinc xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Minizinc xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Minizinc xor differential model

    +

    Minizinc xor differential model

    -class MinizincXorDifferentialModel(cipher, window_size_list=None, probability_weight_per_round=None, sat_or_milp='sat')
    -

    Bases: claasp.cipher_modules.models.minizinc.minizinc_model.MinizincModel

    +class MinizincXorDifferentialModel(cipher, window_size_list=None, probability_weight_per_round=None, sat_or_milp='sat', include_word_operations_mzn_file=True) +

    Bases: MinizincModel

    add_comment(comment)
    @@ -249,6 +250,16 @@

    Navigation

    constraint_permutation_and_key_schedule_separately_by_input_sizes()
    +
    +
    +extend_model_constraints(constraints)
    +
    + +
    +
    +extend_variables(variables)
    +
    +
    find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name=None)
    @@ -410,6 +421,11 @@

    Navigation

    +
    +
    +get_model_constraints()
    +
    +
    get_probability_vars_from_key_schedule()
    @@ -420,6 +436,11 @@

    Navigation

    get_probability_vars_from_permutation()
    +
    +
    +get_variables()
    +
    +
    init_constraints()
    @@ -588,8 +609,8 @@

    Previous topic

    This Page

    @@ -607,7 +628,7 @@

    Quick search

    - +
    @@ -622,7 +643,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/cipher_modules/models/minizinc/utils/mzn_bct_predicates.html b/docs/build/html/cipher_modules/models/minizinc/utils/mzn_bct_predicates.html new file mode 100644 index 00000000..662b3d06 --- /dev/null +++ b/docs/build/html/cipher_modules/models/minizinc/utils/mzn_bct_predicates.html @@ -0,0 +1,226 @@ + + + + + + + + + Mzn bct predicates — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Mzn bct predicates

    +
    +
    +get_bct_operations()
    +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/minizinc/utils/utils.html b/docs/build/html/cipher_modules/models/minizinc/utils/utils.html new file mode 100644 index 00000000..841dd99e --- /dev/null +++ b/docs/build/html/cipher_modules/models/minizinc/utils/utils.html @@ -0,0 +1,231 @@ + + + + + + + + + Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Utils

    +
    +
    +filter_out_strings_containing_substring(strings_list, substring)
    +
    + +
    +
    +group_strings_by_pattern(list_of_data)
    +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model.html b/docs/build/html/cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model.html new file mode 100644 index 00000000..d09a0d4a --- /dev/null +++ b/docs/build/html/cipher_modules/models/sat/cms_models/cms_bitwise_deterministic_truncated_xor_differential_model.html @@ -0,0 +1,509 @@ + + + + + + + + + Cms bitwise deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Cms bitwise deterministic truncated xor differential model

    +
    +

    CMS Deterministic Truncated XOR Differential of a cipher

    +

    The target of this class is to override the methods of the superclass +Sat Deterministic Truncated Xor Differential Model +to take the advantage given by the handling of XOR clauses in CryptoMiniSat SAT solver. Therefore, +the internal format for SAT CNF clauses follows 4 rules (3 from the superclass + +1):

    +
    +
      +
    • every variable is a string with no spaces nor dashes;

    • +
    • if a literal is a negation of a variable, a dash is prepended to the +variable;

    • +
    • the separator for literals is a space;

    • +
    • the string 'x ' is prepended to a clause representing a XOR.

    • +
    +
    +

    Note that only methods that do not need to introduce new variables to handle +XOR operations were overridden.

    +

    For any further information, visit CryptoMiniSat - XOR clauses.

    +
    +
    +class CmsSatDeterministicTruncatedXorDifferentialModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False)
    +

    Bases: SatBitwiseDeterministicTruncatedXorDifferentialModel

    +
    +
    +build_bitwise_deterministic_truncated_xor_differential_trail_model(number_of_unknown_variables=None, fixed_variables=[])
    +

    Build the model for the search of deterministic truncated XOR DIFFERENTIAL trails.

    +

    INPUT:

    +
      +
    • number_of_unknown_variablesint (default: None); the number +of unknown variables that we want to have in the trail

    • +
    • fixed_variableslist (default: []); the variables to be +fixed in standard format

    • +
    +
    +

    See also

    +

    set_fixed_variables()

    +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=22)
    +sage: sat = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: sat.build_bitwise_deterministic_truncated_xor_differential_trail_model()
    +...
    +
    +
    +
    + +
    +
    +calculate_component_weight(component, out_suffix, output_values_dict)
    +
    + +
    +
    +property cipher_id
    +
    + +
    +
    +find_lowest_varied_patterns_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')
    +

    Return the solution representing a differential trail with the lowest number of unknown variables.

    +

    INPUTS:

    +
      +
    • fixed_valueslist of dict, the variables to be fixed in +standard format (see set_fixed_variables())

    • +
    • solver_namestr, the solver to call

    • +
    +

    EXAMPLE:

    +
    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: S = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: trail = S.find_lowest_varied_patterns_bitwise_deterministic_truncated_xor_differential_trail(get_single_key_scenario_format_for_fixed_values(speck))
    +sage: trail['status']
    +'SATISFIABLE'
    +
    +
    +
    + +
    +
    +find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')
    +

    Returns one deterministic truncated XOR differential trail.

    +

    INPUTS:

    +
      +
    • fixed_valueslist of dict, the variables to be fixed in +standard format (see set_fixed_variables())

    • +
    • solver_namestr, the solver to call

    • +
    +

    EXAMPLE:

    +
    sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: M = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(32), bit_values=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    +sage: key = set_fixed_variables(component_id='key', constraint_type='equal', bit_positions=range(64), bit_values=[0]*64)
    +sage: trail = M.find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[plaintext, key])
    +...
    +
    +sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=3)
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: M = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(32), bit_values=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    +sage: key = set_fixed_variables(component_id='key', constraint_type='equal', bit_positions=range(64), bit_values=[0]*64)
    +sage: out = set_fixed_variables(component_id='cipher_output_2_12', constraint_type='equal', bit_positions=range(32), bit_values=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
    +sage: trail = M.find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[plaintext, key, out]) # doctest: +SKIP
    +...
    +
    +sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.present_block_cipher import PresentBlockCipher
    +sage: present = PresentBlockCipher(number_of_rounds=1)
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: sat = SatBitwiseDeterministicTruncatedXorDifferentialModel(present)
    +sage: key = set_fixed_variables(component_id='key', constraint_type='equal', bit_positions=range(80), bit_values=[0]*80)
    +sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(64), bit_values=[2,0,0,0] + [1,0,0,1] + [0,0,0,1] + [1,0,0,0] + [0] * 48)
    +sage: trail = sat.find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[plaintext, key]) # doctest: +SKIP
    +...
    +
    +
    +
    + +
    +
    +fix_variables_value_constraints(fixed_variables=[])
    +

    Return constraints for fixed variables

    +

    Return lists of variables and clauses for fixing variables in bitwise +deterministic truncated XOR differential model.

    +
    +

    See also

    +

    set_fixed_variables()

    +
    +

    INPUT:

    +
      +
    • fixed_variableslist (default: []); variables in default format

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: sat = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: fixed_variables = [{
    +....:    'component_id': 'plaintext',
    +....:    'constraint_type': 'equal',
    +....:    'bit_positions': [0, 1, 2, 3],
    +....:    'bit_values': [1, 0, 1, 1]
    +....: }, {
    +....:    'component_id': 'ciphertext',
    +....:    'constraint_type': 'not_equal',
    +....:    'bit_positions': [0, 1, 2, 3],
    +....:    'bit_values': [2, 1, 1, 0]
    +....: }]
    +sage: sat.fix_variables_value_constraints(fixed_variables)
    +['-plaintext_0_0',
    + 'plaintext_0_1',
    + '-plaintext_1_0',
    + '-plaintext_1_1',
    + '-plaintext_2_0',
    + 'plaintext_2_1',
    + '-plaintext_3_0',
    + 'plaintext_3_1',
    + '-ciphertext_0_0 ciphertext_1_0 -ciphertext_1_1 ciphertext_2_0 -ciphertext_2_1 ciphertext_3_0 ciphertext_3_1']
    +
    +
    +
    + +
    +
    +property model_constraints
    +

    Return the model specified by model_type.

    +

    If the key refers to one of the available solver, Otherwise will raise a KeyError exception.

    +

    INPUT:

    +
      +
    • model_typestring; the model to retrieve

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.sat.sat_model import SatModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=4)
    +sage: sat = SatModel(speck)
    +sage: sat.model_constraints('xor_differential')
    +Traceback (most recent call last):
    +...
    +ValueError: No model generated
    +
    +
    +
    + +
    +
    +property sboxes_ddt_templates
    +
    + +
    +
    +property sboxes_lat_templates
    +
    + +
    +
    +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)
    +

    Return the solution of the model using the solver_name SAT solver.

    +
    +

    Note

    +

    Two types of solvers can be chosen: external or internal. In the following list of inputs, allowed SAT +solvers are listed. Those ending with _sage will not create a subprocess nor additional files and will +work completely embedded in Sage. Remaining solvers are allowed, but they need to be installed in the +system.

    +
    +

    INPUT:

    +
      +
    • model_typestring; the model for which we want a solution. Available values are:

      +
        +
      • 'cipher'

      • +
      • 'xor_differential'

      • +
      • 'xor_linear'

      • +
      +
    • +
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    +
    +

    See also

    +

    SAT Solvers

    +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_cipher_model import SatCipherModel
    +sage: from claasp.ciphers.block_ciphers.tea_block_cipher import TeaBlockCipher
    +sage: tea = TeaBlockCipher(number_of_rounds=32)
    +sage: sat = SatCipherModel(tea)
    +sage: sat.build_cipher_model()
    +sage: sat.solve('cipher') # random
    +{'cipher_id': 'tea_p64_k128_o64_r32',
    + 'model_type': 'tea_p64_k128_o64_r32',
    + 'solver_name': 'CRYPTOMINISAT_EXT',
    + ...
    +  'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1},
    +  'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}},
    + 'total_weight': 0,
    + 'status': 'SATISFIABLE'}}
    +
    +
    +
    + +
    +
    +weight_constraints(number_of_unknown_variables)
    +

    Return lists of variables and constraints that fix the number of unknown +variables of the input and the output of the trail to a specific value.

    +

    INPUT:

    +
      +
    • number_of_unknown_variablesint; the number of the unknown variables

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: sat = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: sat.build_bitwise_deterministic_truncated_xor_differential_trail_model()
    +sage: sat.weight_constraints(4)
    +(['dummy_hw_0_0_0',
    +  'dummy_hw_0_0_1',
    +  'dummy_hw_0_0_2',
    +  ...
    +  '-dummy_hw_0_61_3 dummy_hw_0_62_3',
    +  '-cipher_output_2_12_30_0 -dummy_hw_0_61_3',
    +  '-cipher_output_2_12_31_0 -dummy_hw_0_62_3'])
    +
    +
    +
    + +
    + +
    +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/sat/cms_models/cms_cipher_model.html b/docs/build/html/cipher_modules/models/sat/cms_models/cms_cipher_model.html index 46957deb..e63607d9 100644 --- a/docs/build/html/cipher_modules/models/sat/cms_models/cms_cipher_model.html +++ b/docs/build/html/cipher_modules/models/sat/cms_models/cms_cipher_model.html @@ -1,23 +1,24 @@ - + - Cms cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cms cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,9 +57,9 @@

    Navigation

    -

    Cms cipher model

    +

    Cms cipher model

    -

    CMS cipher model of a cipher

    +

    CMS cipher model of a cipher

    The target of this class is to override the methods of the superclass Sat Cipher Model to take the advantage given by the handling of XOR clauses in CryptoMiniSat SAT solver. Therefore, the @@ -79,7 +80,7 @@

    Navigation

    class CmsSatCipherModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False)
    -

    Bases: claasp.cipher_modules.models.sat.sat_models.sat_cipher_model.SatCipherModel

    +

    Bases: SatCipherModel

    build_cipher_model(fixed_variables=[])
    @@ -114,7 +115,7 @@

    Navigation

    -find_missing_bits(fixed_values=[], solver_name='cryptominisat')
    +find_missing_bits(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return the solution representing a generic flow of the cipher from plaintext and key to ciphertext.

    INPUT:

      @@ -139,7 +140,7 @@

      Navigation

      sage: sat.find_missing_bits(fixed_values=[ciphertext]) # random {'cipher_id': 'speck_p32_k64_o32_r22', 'model_type': 'cipher', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_21_11': {'value': '1411'}, 'cipher_output_21_12': {'value': 'affec7ed'}}, @@ -222,7 +223,7 @@

      Navigation

      -solve(model_type, solver_name='cryptominisat', options=None)
      +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)

      Return the solution of the model using the solver_name SAT solver.

      Note

      @@ -255,7 +256,7 @@

      Navigation

      sage: sat.solve('cipher') # random {'cipher_id': 'tea_p64_k128_o64_r32', 'model_type': 'tea_p64_k128_o64_r32', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}, 'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}}, @@ -315,13 +316,13 @@

      Table of Contents

      This Page

      @@ -339,7 +340,7 @@

      Quick search

    - +
    @@ -354,10 +355,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -365,7 +366,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/sat/cms_models/cms_xor_differential_model.html b/docs/build/html/cipher_modules/models/sat/cms_models/cms_xor_differential_model.html index 878a1ada..7180ee30 100644 --- a/docs/build/html/cipher_modules/models/sat/cms_models/cms_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/sat/cms_models/cms_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Cms xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cms xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,9 +57,9 @@

    Navigation

    -

    Cms xor differential model

    +

    Cms xor differential model

    -

    CMS XOR Differential of a cipher

    +

    CMS XOR Differential of a cipher

    The target of this class is to override the methods of the superclass Sat Xor Differential Model to take the advantage given by the handling of XOR clauses in CryptoMiniSat SAT solver. Therefore, the @@ -78,8 +79,8 @@

    Navigation

    For any further information, visit CryptoMiniSat - XOR clauses.

    -class CmsSatXorDifferentialModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False, window_size_by_round=None)
    -

    Bases: claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model.SatXorDifferentialModel

    +class CmsSatXorDifferentialModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False) +

    Bases: SatXorDifferentialModel

    build_xor_differential_trail_and_checker_model_at_intermediate_output_level(weight=- 1, fixed_variables=[])
    @@ -142,35 +143,41 @@

    Navigation

    -find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='cryptominisat')
    -

    Return a list of solutions containing all the XOR differential trails having the fixed_weight weight.

    +find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return a list of solutions containing all the XOR differential trails having the fixed_weight weight. +By default, the search is set in the single-key setting.

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: sat = SatXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    +sage: trails = sat.find_all_xor_differential_trails_with_fixed_weight(9)
    +sage: len(trails) == 2
    +True
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: sat = SatXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=integer_to_bit_list(0, 64, 'big'))
    -sage: trails = sat.find_all_xor_differential_trails_with_fixed_weight(9, fixed_values=[plaintext, key])
    +....:     bit_values=[0]*64)
    +sage: trails = sat.find_all_xor_differential_trails_with_fixed_weight(2, fixed_values=[key])
     sage: len(trails) == 2
     True
     
    @@ -179,8 +186,9 @@

    Navigation

    -find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='cryptominisat')
    -

    Return a list of solutions.

    +find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return a list of solutions. +By default, the search is set in the single-key setting.

    The list contain all the XOR differential trails having the weight lying in the interval [min_weight, max_weight].

    INPUT:

    @@ -188,30 +196,35 @@

    Navigation

  • min_weightinteger; the weight from which to start the search

  • max_weightinteger; the weight at which the search stops

  • fixed_valueslist (default: []); they can be created using set_fixed_variables method

  • -
  • solver_namestring (default: cryptominisat); the name of the solver

  • +
  • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

  • See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: sat = SatXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    +sage: trails = sat.find_all_xor_differential_trails_with_weight_at_most(9, 10)
    +sage: len(trails) == 28
    +True
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: sat = SatXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=integer_to_bit_list(0, 64, 'big'))
    -sage: trails = sat.find_all_xor_differential_trails_with_weight_at_most(9, 10, fixed_values=[plaintext, key])
    -sage: len(trails) == 28
    +....:     bit_values=[0]*64)
    +sage: trails = sat.find_all_xor_differential_trails_with_weight_at_most(2, 3, fixed_values=[key])
    +sage: len(trails) == 9
     True
     
    @@ -219,8 +232,9 @@

    Navigation

    -find_lowest_weight_xor_differential_trail(fixed_values=[], solver_name='cryptominisat')
    -

    Return the solution representing a trail with the lowest weight.

    +find_lowest_weight_xor_differential_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return the solution representing a trail with the lowest weight. +By default, the search is set in the single-key setting.

    Note

    There could be more than one trail with the lowest weight. In order to find all the lowest weight trail, @@ -229,105 +243,121 @@

    Navigation

    INPUT:

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: sat = SatXorDifferentialModel(speck)
    +sage: trail = sat.find_lowest_weight_xor_differential_trail()
    +sage: trail['total_weight']
    +9.0
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: sat = SatXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=(0,)*32)
     sage: key = set_fixed_variables(
     ....:         component_id='key',
    -....:         constraint_type='equal',
    +....:         constraint_type='not_equal',
     ....:         bit_positions=range(64),
     ....:         bit_values=(0,)*64)
    -sage: trail = sat.find_lowest_weight_xor_differential_trail(fixed_values=[plaintext, key])
    +sage: trail = sat.find_lowest_weight_xor_differential_trail(fixed_values=[key])
     sage: trail['total_weight']
    -9.0
    +1.0
     
    -find_one_xor_differential_trail(fixed_values=[], solver_name='cryptominisat')
    -

    Return the solution representing a XOR differential trail.

    -

    The solution probability is almost always lower than the one of a random guess of the longest input.

    +find_one_xor_differential_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return the solution representing a XOR differential trail. +By default, the search is set in the single-key setting. +The solution probability is almost always lower than the one of a random guess of the longest input.

    INPUT:

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: sat = SatXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    +sage: sat.find_one_xor_differential_trail() # random
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: sat = SatXorDifferentialModel(speck)
    +sage: key = set_fixed_variables(
    +....:     component_id='key',
     ....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: sat.find_one_xor_differential_trail(fixed_values=[plaintext]) # random
    -{'cipher_id': 'speck_p32_k64_o32_r5',
    - 'model_type': 'xor_differential',
    - 'solver_name': 'cryptominisat',
    - 'solving_time_seconds': 0.0,
    - 'memory_megabytes': 7.09,
    - ...
    - 'status': 'SATISFIABLE',
    - 'building_time_seconds': 0.004874706268310547}
    +....:     bit_positions=range(64),
    +....:     bit_values=[0]*64)
    +sage: result = sat.find_one_xor_differential_trail(fixed_values=[key])
    +sage: result['total_weight'] == 9.0
    +True
     
    -find_one_xor_differential_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='cryptominisat')
    -

    Return the solution representing a XOR differential trail whose probability is 2 ** fixed_weight.

    -

    INPUT:

    +find_one_xor_differential_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return the solution representing a XOR differential trail whose probability is 2 ** fixed_weight. +By default, the search is set in the single-key setting. +INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
    -sage: sat = SatXorDifferentialModel(speck, window_size_by_round=[0, 0, 0])
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=(0,)*32)
    +sage: sat = SatXorDifferentialModel(speck)
    +sage: sat.set_window_size_heuristic_by_round([0, 0, 0])
    +sage: trail = sat.find_one_xor_differential_trail_with_fixed_weight(3)
    +sage: trail['total_weight']
    +3.0
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: sat = SatXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=(0,)*64)
    -sage: result = sat.find_one_xor_differential_trail_with_fixed_weight(3, fixed_values=[plaintext, key])
    -sage: result['total_weight']
    +....:     bit_values=[0]*64)
    +sage: trail = sat.find_one_xor_differential_trail_with_fixed_weight(3, fixed_values=[key])
    +sage: trail['total_weight']
     3.0
     
    @@ -403,9 +433,19 @@

    Navigation

    property sboxes_lat_templates
    +
    +
    +set_window_size_heuristic_by_component_id(window_size_by_component_id_values, number_of_full_windows=None, full_window_operator='at_least')
    +
    + +
    +
    +set_window_size_heuristic_by_round(window_size_by_round_values, number_of_full_windows=None, full_window_operator='at_least')
    +
    +
    -solve(model_type, solver_name='cryptominisat', options=None)
    +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)

    Return the solution of the model using the solver_name SAT solver.

    Note

    @@ -438,7 +478,7 @@

    Navigation

    sage: sat.solve('cipher') # random {'cipher_id': 'tea_p64_k128_o64_r32', 'model_type': 'tea_p64_k128_o64_r32', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}, 'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}}, @@ -475,8 +515,18 @@

    Navigation

    -
    -property window_size_by_round
    +
    +property window_size_by_component_id_values
    +
    + +
    +
    +property window_size_by_round_values
    +
    + +
    +
    +property window_size_number_of_full_window
    @@ -503,13 +553,13 @@

    Table of Contents

    This Page

    @@ -527,7 +577,7 @@

    Quick search

    - +
    @@ -542,10 +592,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -553,7 +603,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/sat/cms_models/cms_xor_linear_model.html b/docs/build/html/cipher_modules/models/sat/cms_models/cms_xor_linear_model.html index 2c06d3e0..38825897 100644 --- a/docs/build/html/cipher_modules/models/sat/cms_models/cms_xor_linear_model.html +++ b/docs/build/html/cipher_modules/models/sat/cms_models/cms_xor_linear_model.html @@ -1,23 +1,24 @@ - + - Cms xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cms xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,9 +57,9 @@

    Navigation

    -

    Cms xor linear model

    +

    Cms xor linear model

    -

    CMS XOR LINEAR model of a cipher

    +

    CMS XOR LINEAR model of a cipher

    The target of this class is to override the methods of the superclass Sat Xor Linear Model to take the advantage given by the handling of XOR clauses in CryptoMiniSat SAT solver. Therefore, the @@ -79,7 +80,7 @@

    Navigation

    class CmsSatXorLinearModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False)
    -

    Bases: claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model.SatXorLinearModel

    +

    Bases: SatXorLinearModel

    branch_xor_linear_constraints()
    @@ -141,14 +142,15 @@

    Navigation

    -find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='cryptominisat')
    +find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return a list of solutions containing all the XOR linear trails having weight equal to fixed_weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    @@ -157,16 +159,21 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trails = sat.find_all_xor_linear_trails_with_fixed_weight(2, fixed_values=[plaintext])
    -sage: len(trails) == 2
    +sage: trails = sat.find_all_xor_linear_trails_with_fixed_weight(1)
    +sage: len(trails) == 4
    +True
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: sat = SatXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trails = sat.find_all_xor_linear_trails_with_fixed_weight(2, fixed_values=[key]) # long
    +sage: len(trails) == 8
     True
     
    @@ -174,8 +181,9 @@

    Navigation

    -find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='cryptominisat')
    -

    Return a list of solutions.

    +find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return a list of solutions. +By default, the search removes the key schedule, if any.

    The list contains all the XOR linear trails having the weight lying in the interval [min_weight, max_weight].

    INPUT:

    @@ -183,7 +191,7 @@

    Navigation

  • min_weightinteger; the weight from which to start the search

  • max_weightinteger; the weight at which the search stops

  • fixed_valueslist (default: []); can be created using set_fixed_variables method

  • -
  • solver_namestring (default: cryptominisat); the name of the solver

  • +
  • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

  • See also

    @@ -192,16 +200,21 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trails = sat.find_all_xor_linear_trails_with_weight_at_most(2, 3, fixed_values=[plaintext])
    -sage: len(trails) == 11
    +sage: trails = sat.find_all_xor_linear_trails_with_weight_at_most(0, 2) # long
    +sage: len(trails) == 187
    +True
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: sat = SatXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trails = sat.find_all_xor_linear_trails_with_weight_at_most(0, 3, fixed_values=[key]) # long
    +sage: len(trails) == 73
     True
     
    @@ -209,8 +222,9 @@

    Navigation

    -find_lowest_weight_xor_linear_trail(fixed_values=[], solver_name='cryptominisat')
    +find_lowest_weight_xor_linear_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return the solution representing a XOR LINEAR trail with the lowest possible weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    Note

    @@ -220,7 +234,7 @@

    Navigation

    INPUT:

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    @@ -229,31 +243,37 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trail = sat.find_lowest_weight_xor_linear_trail(fixed_values=[plaintext])
    +sage: trail = sat.find_lowest_weight_xor_linear_trail()
     sage: trail['total_weight']
    -2.0
    +1.0
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=4)
    +sage: sat = SatXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(32)), [0] * 32)
    +sage: trail = sat.find_lowest_weight_xor_linear_trail(fixed_values=[key])
    +sage: trail['total_weight']
    +3.0
     
    -find_one_xor_linear_trail(fixed_values=[], solver_name='cryptominisat')
    +find_one_xor_linear_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return the solution representing a XOR linear trail. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    The solution probability is almost always lower than the one of a random guess of the longest input.

    INPUT:

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    @@ -262,15 +282,9 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=4)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: sat.find_one_xor_linear_trail(fixed_values=[plaintext]) # random
    +sage: sat.find_one_xor_linear_trail() # random
     {'cipher_id': 'speck_p32_k64_o32_r4',
      'model_type': 'xor_linear',
      'solver_name': 'cryptominisat',
    @@ -279,20 +293,30 @@ 

    Navigation

    ... 'status': 'SATISFIABLE', 'building_time_seconds': 0.010079622268676758} + +# including the key schedule in the model +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher +sage: from claasp.cipher_modules.models.utils import set_fixed_variables +sage: speck = SpeckBlockCipher(number_of_rounds=4) +sage: sat = SatXorLinearModel(speck) +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64) +sage: sat.find_one_xor_linear_trail(fixed_values=[key]) # random
    -find_one_xor_linear_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='cryptominisat')
    +find_one_xor_linear_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return the solution representing a XOR linear trail whose weight is fixed_weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    @@ -300,18 +324,23 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=(0,)*32)
    -sage: result = sat.find_one_xor_linear_trail_with_fixed_weight(7, fixed_values=[plaintext])
    -sage: result['total_weight']
    +sage: trail = sat.find_one_xor_linear_trail_with_fixed_weight(7)
    +sage: trail['total_weight']
     7.0
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: sat = SatXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trail = sat.find_one_xor_linear_trail_with_fixed_weight(3, fixed_values=[key])
    +sage: trail['total_weight']
    +3.0
     
    @@ -426,7 +455,7 @@

    Navigation

    -solve(model_type, solver_name='cryptominisat', options=None)
    +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)

    Return the solution of the model using the solver_name SAT solver.

    Note

    @@ -459,7 +488,7 @@

    Navigation

    sage: sat.solve('cipher') # random {'cipher_id': 'tea_p64_k128_o64_r32', 'model_type': 'tea_p64_k128_o64_r32', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}, 'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}}, @@ -524,13 +553,13 @@

    Table of Contents

    Previous topic

    -

    Sat cipher model

    +

    Cms cipher model

    This Page

    @@ -548,7 +577,7 @@

    Quick search

    - +
    @@ -563,10 +592,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -574,7 +603,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/sat/sat_model.html b/docs/build/html/cipher_modules/models/sat/sat_model.html index 7e0a4baa..f0db59f4 100644 --- a/docs/build/html/cipher_modules/models/sat/sat_model.html +++ b/docs/build/html/cipher_modules/models/sat/sat_model.html @@ -1,23 +1,24 @@ - + - Sat model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sat model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,9 +57,9 @@

    Navigation

    -

    Sat model

    +

    Sat model

    -

    SAT standard of Cipher

    +

    SAT standard of Cipher

    The target of this class is to build, solve and retrieve the solution of a SAT CNF representing some attacks on ciphers, e.g. the generic cipher inversion or the search for XOR differential trails (for SMT CNFs see the correspondent @@ -75,64 +76,11 @@

    Navigation

    find_lowest_weight_xor_differential_trail, …).

    -

    SAT Solvers

    -

    This module is able to use different SAT solvers. They can be divided in two -categories: external and internal. All over the module, solver_name -variable can be replaced with a value in the following.

    -

    External SAT solvers need to be installed in the system as they are called -using a subprocess. They and corresponding values for solver_name variable -are:

    -
    -
    ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    SAT solver

    value

    CaDiCal

    'cadical'

    CryptoMiniSat

    'cryptominisat'

    Glucose

    'glucose'

    Glucose-syrup

    'glucose-syrup'

    Kissat

    'kissat'

    MathSAT

    'mathsat'

    Minisat

    'minisat'

    Yices-sat

    'yices-sat'

    -
    -

    Internal SAT solvers should be installed by default. To call them, use the -following values:

    -
    -
      -
    • 'cryptominisat_sage'

    • -
    • 'glucose_sage'

    • -
    • 'glucose-syrup_sage'

    • -
    • 'LP_sage'

    • -
    • 'picosat_sage'

    • -
    -
    -

    For any further information on internal SAT solvers, visit Abstract SAT solver.

    +

    SAT Solvers

    +

    This module is able to use many different SAT solvers.

    +

    For any further information, refer to the file +claasp.cipher_modules.models.sat.solvers.py and to the section +Available SAT solvers.

    REMARK: in order to be compliant with the library, the Most Significant Bit (MSB) is indexed by 0. Be careful whenever inspecting the code or, as well, a CNF.

    @@ -222,7 +170,7 @@

    Navigation

    -solve(model_type, solver_name='cryptominisat', options=None)
    +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)

    Return the solution of the model using the solver_name SAT solver.

    Note

    @@ -255,7 +203,7 @@

    Navigation

    sage: sat.solve('cipher') # random {'cipher_id': 'tea_p64_k128_o64_r32', 'model_type': 'tea_p64_k128_o64_r32', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}, 'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}}, @@ -316,13 +264,13 @@

    Table of Contents

    Previous topic

    -

    Minizinc cipher model

    +

    Solvers

    This Page

    @@ -340,7 +288,7 @@

    Quick search

    - +
    @@ -355,10 +303,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -366,7 +314,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model.html b/docs/build/html/cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model.html new file mode 100644 index 00000000..608dc7b9 --- /dev/null +++ b/docs/build/html/cipher_modules/models/sat/sat_models/sat_bitwise_deterministic_truncated_xor_differential_model.html @@ -0,0 +1,479 @@ + + + + + + + + + Sat bitwise deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Sat bitwise deterministic truncated xor differential model

    +
    +
    +class SatBitwiseDeterministicTruncatedXorDifferentialModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False)
    +

    Bases: SatModel

    +
    +
    +build_bitwise_deterministic_truncated_xor_differential_trail_model(number_of_unknown_variables=None, fixed_variables=[])
    +

    Build the model for the search of deterministic truncated XOR DIFFERENTIAL trails.

    +

    INPUT:

    +
      +
    • number_of_unknown_variablesint (default: None); the number +of unknown variables that we want to have in the trail

    • +
    • fixed_variableslist (default: []); the variables to be +fixed in standard format

    • +
    +
    +

    See also

    +

    set_fixed_variables()

    +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=22)
    +sage: sat = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: sat.build_bitwise_deterministic_truncated_xor_differential_trail_model()
    +...
    +
    +
    +
    + +
    +
    +calculate_component_weight(component, out_suffix, output_values_dict)
    +
    + +
    +
    +property cipher_id
    +
    + +
    +
    +find_lowest_varied_patterns_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')
    +

    Return the solution representing a differential trail with the lowest number of unknown variables.

    +

    INPUTS:

    +
      +
    • fixed_valueslist of dict, the variables to be fixed in +standard format (see set_fixed_variables())

    • +
    • solver_namestr, the solver to call

    • +
    +

    EXAMPLE:

    +
    sage: from claasp.cipher_modules.models.utils import get_single_key_scenario_format_for_fixed_values
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: S = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: trail = S.find_lowest_varied_patterns_bitwise_deterministic_truncated_xor_differential_trail(get_single_key_scenario_format_for_fixed_values(speck))
    +sage: trail['status']
    +'SATISFIABLE'
    +
    +
    +
    + +
    +
    +find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')
    +

    Returns one deterministic truncated XOR differential trail.

    +

    INPUTS:

    +
      +
    • fixed_valueslist of dict, the variables to be fixed in +standard format (see set_fixed_variables())

    • +
    • solver_namestr, the solver to call

    • +
    +

    EXAMPLE:

    +
    sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: M = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(32), bit_values=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    +sage: key = set_fixed_variables(component_id='key', constraint_type='equal', bit_positions=range(64), bit_values=[0]*64)
    +sage: trail = M.find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[plaintext, key])
    +...
    +
    +sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=3)
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: M = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(32), bit_values=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    +sage: key = set_fixed_variables(component_id='key', constraint_type='equal', bit_positions=range(64), bit_values=[0]*64)
    +sage: out = set_fixed_variables(component_id='cipher_output_2_12', constraint_type='equal', bit_positions=range(32), bit_values=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
    +sage: trail = M.find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[plaintext, key, out]) # doctest: +SKIP
    +...
    +
    +sage: from claasp.cipher_modules.models.utils import integer_to_bit_list, set_fixed_variables
    +sage: from claasp.ciphers.block_ciphers.present_block_cipher import PresentBlockCipher
    +sage: present = PresentBlockCipher(number_of_rounds=1)
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: sat = SatBitwiseDeterministicTruncatedXorDifferentialModel(present)
    +sage: key = set_fixed_variables(component_id='key', constraint_type='equal', bit_positions=range(80), bit_values=[0]*80)
    +sage: plaintext = set_fixed_variables(component_id='plaintext', constraint_type='equal', bit_positions=range(64), bit_values=[2,0,0,0] + [1,0,0,1] + [0,0,0,1] + [1,0,0,0] + [0] * 48)
    +sage: trail = sat.find_one_bitwise_deterministic_truncated_xor_differential_trail(fixed_values=[plaintext, key]) # doctest: +SKIP
    +...
    +
    +
    +
    + +
    +
    +fix_variables_value_constraints(fixed_variables=[])
    +

    Return constraints for fixed variables

    +

    Return lists of variables and clauses for fixing variables in bitwise +deterministic truncated XOR differential model.

    +
    +

    See also

    +

    set_fixed_variables()

    +
    +

    INPUT:

    +
      +
    • fixed_variableslist (default: []); variables in default format

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: sat = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: fixed_variables = [{
    +....:    'component_id': 'plaintext',
    +....:    'constraint_type': 'equal',
    +....:    'bit_positions': [0, 1, 2, 3],
    +....:    'bit_values': [1, 0, 1, 1]
    +....: }, {
    +....:    'component_id': 'ciphertext',
    +....:    'constraint_type': 'not_equal',
    +....:    'bit_positions': [0, 1, 2, 3],
    +....:    'bit_values': [2, 1, 1, 0]
    +....: }]
    +sage: sat.fix_variables_value_constraints(fixed_variables)
    +['-plaintext_0_0',
    + 'plaintext_0_1',
    + '-plaintext_1_0',
    + '-plaintext_1_1',
    + '-plaintext_2_0',
    + 'plaintext_2_1',
    + '-plaintext_3_0',
    + 'plaintext_3_1',
    + '-ciphertext_0_0 ciphertext_1_0 -ciphertext_1_1 ciphertext_2_0 -ciphertext_2_1 ciphertext_3_0 ciphertext_3_1']
    +
    +
    +
    + +
    +
    +property model_constraints
    +

    Return the model specified by model_type.

    +

    If the key refers to one of the available solver, Otherwise will raise a KeyError exception.

    +

    INPUT:

    +
      +
    • model_typestring; the model to retrieve

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.sat.sat_model import SatModel
    +sage: speck = SpeckBlockCipher(number_of_rounds=4)
    +sage: sat = SatModel(speck)
    +sage: sat.model_constraints('xor_differential')
    +Traceback (most recent call last):
    +...
    +ValueError: No model generated
    +
    +
    +
    + +
    +
    +property sboxes_ddt_templates
    +
    + +
    +
    +property sboxes_lat_templates
    +
    + +
    +
    +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)
    +

    Return the solution of the model using the solver_name SAT solver.

    +
    +

    Note

    +

    Two types of solvers can be chosen: external or internal. In the following list of inputs, allowed SAT +solvers are listed. Those ending with _sage will not create a subprocess nor additional files and will +work completely embedded in Sage. Remaining solvers are allowed, but they need to be installed in the +system.

    +
    +

    INPUT:

    +
      +
    • model_typestring; the model for which we want a solution. Available values are:

      +
        +
      • 'cipher'

      • +
      • 'xor_differential'

      • +
      • 'xor_linear'

      • +
      +
    • +
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    +
    +

    See also

    +

    SAT Solvers

    +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_cipher_model import SatCipherModel
    +sage: from claasp.ciphers.block_ciphers.tea_block_cipher import TeaBlockCipher
    +sage: tea = TeaBlockCipher(number_of_rounds=32)
    +sage: sat = SatCipherModel(tea)
    +sage: sat.build_cipher_model()
    +sage: sat.solve('cipher') # random
    +{'cipher_id': 'tea_p64_k128_o64_r32',
    + 'model_type': 'tea_p64_k128_o64_r32',
    + 'solver_name': 'CRYPTOMINISAT_EXT',
    + ...
    +  'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1},
    +  'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}},
    + 'total_weight': 0,
    + 'status': 'SATISFIABLE'}}
    +
    +
    +
    + +
    +
    +weight_constraints(number_of_unknown_variables)
    +

    Return lists of variables and constraints that fix the number of unknown +variables of the input and the output of the trail to a specific value.

    +

    INPUT:

    +
      +
    • number_of_unknown_variablesint; the number of the unknown variables

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_bitwise_deterministic_truncated_xor_differential_model import SatBitwiseDeterministicTruncatedXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: sat = SatBitwiseDeterministicTruncatedXorDifferentialModel(speck)
    +sage: sat.build_bitwise_deterministic_truncated_xor_differential_trail_model()
    +sage: sat.weight_constraints(4)
    +(['dummy_hw_0_0_0',
    +  'dummy_hw_0_0_1',
    +  'dummy_hw_0_0_2',
    +  ...
    +  '-dummy_hw_0_61_3 dummy_hw_0_62_3',
    +  '-cipher_output_2_12_30_0 -dummy_hw_0_61_3',
    +  '-cipher_output_2_12_31_0 -dummy_hw_0_62_3'])
    +
    +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/sat/sat_models/sat_cipher_model.html b/docs/build/html/cipher_modules/models/sat/sat_models/sat_cipher_model.html index 99d6f2ff..65775448 100644 --- a/docs/build/html/cipher_modules/models/sat/sat_models/sat_cipher_model.html +++ b/docs/build/html/cipher_modules/models/sat/sat_models/sat_cipher_model.html @@ -1,23 +1,24 @@ - + - Sat cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sat cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Sat cipher model

    +

    Sat cipher model

    class SatCipherModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False)
    -

    Bases: claasp.cipher_modules.models.sat.sat_model.SatModel

    +

    Bases: SatModel

    build_cipher_model(fixed_variables=[])
    @@ -95,7 +96,7 @@

    Navigation

    -find_missing_bits(fixed_values=[], solver_name='cryptominisat')
    +find_missing_bits(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return the solution representing a generic flow of the cipher from plaintext and key to ciphertext.

    INPUT:

      @@ -120,7 +121,7 @@

      Navigation

      sage: sat.find_missing_bits(fixed_values=[ciphertext]) # random {'cipher_id': 'speck_p32_k64_o32_r22', 'model_type': 'cipher', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_21_11': {'value': '1411'}, 'cipher_output_21_12': {'value': 'affec7ed'}}, @@ -203,7 +204,7 @@

      Navigation

      -solve(model_type, solver_name='cryptominisat', options=None)
      +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)

      Return the solution of the model using the solver_name SAT solver.

      Note

      @@ -236,7 +237,7 @@

      Navigation

      sage: sat.solve('cipher') # random {'cipher_id': 'tea_p64_k128_o64_r32', 'model_type': 'tea_p64_k128_o64_r32', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}, 'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}}, @@ -285,13 +286,13 @@

      Navigation

      - +
    @@ -324,10 +325,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -335,7 +336,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/sat/sat_models/sat_xor_differential_model.html b/docs/build/html/cipher_modules/models/sat/sat_models/sat_xor_differential_model.html index 4256dde9..94c925fc 100644 --- a/docs/build/html/cipher_modules/models/sat/sat_models/sat_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/sat/sat_models/sat_xor_differential_model.html @@ -1,22 +1,23 @@ - + - Sat xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sat xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Sat xor differential model

    +

    Sat xor differential model

    -class SatXorDifferentialModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False, window_size_by_round=None)
    -

    Bases: claasp.cipher_modules.models.sat.sat_model.SatModel

    +class SatXorDifferentialModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False) +

    Bases: SatModel

    build_xor_differential_trail_and_checker_model_at_intermediate_output_level(weight=- 1, fixed_variables=[])
    @@ -122,35 +123,41 @@

    Navigation

    -find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='cryptominisat')
    -

    Return a list of solutions containing all the XOR differential trails having the fixed_weight weight.

    +find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return a list of solutions containing all the XOR differential trails having the fixed_weight weight. +By default, the search is set in the single-key setting.

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: sat = SatXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    +sage: trails = sat.find_all_xor_differential_trails_with_fixed_weight(9)
    +sage: len(trails) == 2
    +True
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: sat = SatXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=integer_to_bit_list(0, 64, 'big'))
    -sage: trails = sat.find_all_xor_differential_trails_with_fixed_weight(9, fixed_values=[plaintext, key])
    +....:     bit_values=[0]*64)
    +sage: trails = sat.find_all_xor_differential_trails_with_fixed_weight(2, fixed_values=[key])
     sage: len(trails) == 2
     True
     
    @@ -159,8 +166,9 @@

    Navigation

    -find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='cryptominisat')
    -

    Return a list of solutions.

    +find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return a list of solutions. +By default, the search is set in the single-key setting.

    The list contain all the XOR differential trails having the weight lying in the interval [min_weight, max_weight].

    INPUT:

    @@ -168,30 +176,35 @@

    Navigation

  • min_weightinteger; the weight from which to start the search

  • max_weightinteger; the weight at which the search stops

  • fixed_valueslist (default: []); they can be created using set_fixed_variables method

  • -
  • solver_namestring (default: cryptominisat); the name of the solver

  • +
  • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

  • See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: sat = SatXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    +sage: trails = sat.find_all_xor_differential_trails_with_weight_at_most(9, 10)
    +sage: len(trails) == 28
    +True
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: sat = SatXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=integer_to_bit_list(0, 64, 'big'))
    -sage: trails = sat.find_all_xor_differential_trails_with_weight_at_most(9, 10, fixed_values=[plaintext, key])
    -sage: len(trails) == 28
    +....:     bit_values=[0]*64)
    +sage: trails = sat.find_all_xor_differential_trails_with_weight_at_most(2, 3, fixed_values=[key])
    +sage: len(trails) == 9
     True
     
    @@ -199,8 +212,9 @@

    Navigation

    -find_lowest_weight_xor_differential_trail(fixed_values=[], solver_name='cryptominisat')
    -

    Return the solution representing a trail with the lowest weight.

    +find_lowest_weight_xor_differential_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return the solution representing a trail with the lowest weight. +By default, the search is set in the single-key setting.

    Note

    There could be more than one trail with the lowest weight. In order to find all the lowest weight trail, @@ -209,105 +223,121 @@

    Navigation

    INPUT:

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: sat = SatXorDifferentialModel(speck)
    +sage: trail = sat.find_lowest_weight_xor_differential_trail()
    +sage: trail['total_weight']
    +9.0
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: sat = SatXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=(0,)*32)
     sage: key = set_fixed_variables(
     ....:         component_id='key',
    -....:         constraint_type='equal',
    +....:         constraint_type='not_equal',
     ....:         bit_positions=range(64),
     ....:         bit_values=(0,)*64)
    -sage: trail = sat.find_lowest_weight_xor_differential_trail(fixed_values=[plaintext, key])
    +sage: trail = sat.find_lowest_weight_xor_differential_trail(fixed_values=[key])
     sage: trail['total_weight']
    -9.0
    +1.0
     
    -find_one_xor_differential_trail(fixed_values=[], solver_name='cryptominisat')
    -

    Return the solution representing a XOR differential trail.

    -

    The solution probability is almost always lower than the one of a random guess of the longest input.

    +find_one_xor_differential_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return the solution representing a XOR differential trail. +By default, the search is set in the single-key setting. +The solution probability is almost always lower than the one of a random guess of the longest input.

    INPUT:

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: sat = SatXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    +sage: sat.find_one_xor_differential_trail() # random
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: sat = SatXorDifferentialModel(speck)
    +sage: key = set_fixed_variables(
    +....:     component_id='key',
     ....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: sat.find_one_xor_differential_trail(fixed_values=[plaintext]) # random
    -{'cipher_id': 'speck_p32_k64_o32_r5',
    - 'model_type': 'xor_differential',
    - 'solver_name': 'cryptominisat',
    - 'solving_time_seconds': 0.0,
    - 'memory_megabytes': 7.09,
    - ...
    - 'status': 'SATISFIABLE',
    - 'building_time_seconds': 0.004874706268310547}
    +....:     bit_positions=range(64),
    +....:     bit_values=[0]*64)
    +sage: result = sat.find_one_xor_differential_trail(fixed_values=[key])
    +sage: result['total_weight'] == 9.0
    +True
     
    -find_one_xor_differential_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='cryptominisat')
    -

    Return the solution representing a XOR differential trail whose probability is 2 ** fixed_weight.

    -

    INPUT:

    +find_one_xor_differential_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return the solution representing a XOR differential trail whose probability is 2 ** fixed_weight. +By default, the search is set in the single-key setting. +INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    SAT Solvers

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
    -sage: sat = SatXorDifferentialModel(speck, window_size_by_round=[0, 0, 0])
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=(0,)*32)
    +sage: sat = SatXorDifferentialModel(speck)
    +sage: sat.set_window_size_heuristic_by_round([0, 0, 0])
    +sage: trail = sat.find_one_xor_differential_trail_with_fixed_weight(3)
    +sage: trail['total_weight']
    +3.0
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_differential_model import SatXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: sat = SatXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=(0,)*64)
    -sage: result = sat.find_one_xor_differential_trail_with_fixed_weight(3, fixed_values=[plaintext, key])
    -sage: result['total_weight']
    +....:     bit_values=[0]*64)
    +sage: trail = sat.find_one_xor_differential_trail_with_fixed_weight(3, fixed_values=[key])
    +sage: trail['total_weight']
     3.0
     
    @@ -383,9 +413,19 @@

    Navigation

    property sboxes_lat_templates
    +
    +
    +set_window_size_heuristic_by_component_id(window_size_by_component_id_values, number_of_full_windows=None, full_window_operator='at_least')
    +
    + +
    +
    +set_window_size_heuristic_by_round(window_size_by_round_values, number_of_full_windows=None, full_window_operator='at_least')
    +
    +
    -solve(model_type, solver_name='cryptominisat', options=None)
    +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)

    Return the solution of the model using the solver_name SAT solver.

    Note

    @@ -418,7 +458,7 @@

    Navigation

    sage: sat.solve('cipher') # random {'cipher_id': 'tea_p64_k128_o64_r32', 'model_type': 'tea_p64_k128_o64_r32', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}, 'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}}, @@ -455,8 +495,18 @@

    Navigation

    -
    -property window_size_by_round
    +
    +property window_size_by_component_id_values
    +
    + +
    +
    +property window_size_by_round_values
    +
    + +
    +
    +property window_size_number_of_full_window
    @@ -477,8 +527,8 @@

    Previous topic

    This Page

    @@ -496,7 +546,7 @@

    Quick search

    - +
    @@ -511,7 +561,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/cipher_modules/models/sat/sat_models/sat_xor_linear_model.html b/docs/build/html/cipher_modules/models/sat/sat_models/sat_xor_linear_model.html index bdbf2a31..6497da26 100644 --- a/docs/build/html/cipher_modules/models/sat/sat_models/sat_xor_linear_model.html +++ b/docs/build/html/cipher_modules/models/sat/sat_models/sat_xor_linear_model.html @@ -1,23 +1,24 @@ - + - Sat xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sat xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Sat xor linear model

    +

    Sat xor linear model

    class SatXorLinearModel(cipher, window_size_weight_pr_vars=- 1, counter='sequential', compact=False)
    -

    Bases: claasp.cipher_modules.models.sat.sat_model.SatModel

    +

    Bases: SatModel

    branch_xor_linear_constraints()
    @@ -121,14 +122,15 @@

    Navigation

    -find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='cryptominisat')
    +find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return a list of solutions containing all the XOR linear trails having weight equal to fixed_weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    @@ -137,16 +139,21 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trails = sat.find_all_xor_linear_trails_with_fixed_weight(2, fixed_values=[plaintext])
    -sage: len(trails) == 2
    +sage: trails = sat.find_all_xor_linear_trails_with_fixed_weight(1)
    +sage: len(trails) == 4
    +True
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: sat = SatXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trails = sat.find_all_xor_linear_trails_with_fixed_weight(2, fixed_values=[key]) # long
    +sage: len(trails) == 8
     True
     
    @@ -154,8 +161,9 @@

    Navigation

    -find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='cryptominisat')
    -

    Return a list of solutions.

    +find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT') +

    Return a list of solutions. +By default, the search removes the key schedule, if any.

    The list contains all the XOR linear trails having the weight lying in the interval [min_weight, max_weight].

    INPUT:

    @@ -163,7 +171,7 @@

    Navigation

  • min_weightinteger; the weight from which to start the search

  • max_weightinteger; the weight at which the search stops

  • fixed_valueslist (default: []); can be created using set_fixed_variables method

  • -
  • solver_namestring (default: cryptominisat); the name of the solver

  • +
  • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

  • See also

    @@ -172,16 +180,21 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trails = sat.find_all_xor_linear_trails_with_weight_at_most(2, 3, fixed_values=[plaintext])
    -sage: len(trails) == 11
    +sage: trails = sat.find_all_xor_linear_trails_with_weight_at_most(0, 2) # long
    +sage: len(trails) == 187
    +True
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: sat = SatXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trails = sat.find_all_xor_linear_trails_with_weight_at_most(0, 3, fixed_values=[key]) # long
    +sage: len(trails) == 73
     True
     
    @@ -189,8 +202,9 @@

    Navigation

    -find_lowest_weight_xor_linear_trail(fixed_values=[], solver_name='cryptominisat')
    +find_lowest_weight_xor_linear_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return the solution representing a XOR LINEAR trail with the lowest possible weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    Note

    @@ -200,7 +214,7 @@

    Navigation

    INPUT:

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    @@ -209,31 +223,37 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trail = sat.find_lowest_weight_xor_linear_trail(fixed_values=[plaintext])
    +sage: trail = sat.find_lowest_weight_xor_linear_trail()
     sage: trail['total_weight']
    -2.0
    +1.0
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=4)
    +sage: sat = SatXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(32)), [0] * 32)
    +sage: trail = sat.find_lowest_weight_xor_linear_trail(fixed_values=[key])
    +sage: trail['total_weight']
    +3.0
     
    -find_one_xor_linear_trail(fixed_values=[], solver_name='cryptominisat')
    +find_one_xor_linear_trail(fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return the solution representing a XOR linear trail. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    The solution probability is almost always lower than the one of a random guess of the longest input.

    INPUT:

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    @@ -242,15 +262,9 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=4)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: sat.find_one_xor_linear_trail(fixed_values=[plaintext]) # random
    +sage: sat.find_one_xor_linear_trail() # random
     {'cipher_id': 'speck_p32_k64_o32_r4',
      'model_type': 'xor_linear',
      'solver_name': 'cryptominisat',
    @@ -259,20 +273,30 @@ 

    Navigation

    ... 'status': 'SATISFIABLE', 'building_time_seconds': 0.010079622268676758} + +# including the key schedule in the model +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher +sage: from claasp.cipher_modules.models.utils import set_fixed_variables +sage: speck = SpeckBlockCipher(number_of_rounds=4) +sage: sat = SatXorLinearModel(speck) +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64) +sage: sat.find_one_xor_linear_trail(fixed_values=[key]) # random
    -find_one_xor_linear_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='cryptominisat')
    +find_one_xor_linear_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='CRYPTOMINISAT_EXT')

    Return the solution representing a XOR linear trail whose weight is fixed_weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: cryptominisat); the name of the solver

    • +
    • solver_namestring (default: CRYPTOMINISAT_EXT); the name of the solver

    See also

    @@ -280,18 +304,23 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: sat = SatXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=(0,)*32)
    -sage: result = sat.find_one_xor_linear_trail_with_fixed_weight(7, fixed_values=[plaintext])
    -sage: result['total_weight']
    +sage: trail = sat.find_one_xor_linear_trail_with_fixed_weight(7)
    +sage: trail['total_weight']
     7.0
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.sat.sat_models.sat_xor_linear_model import SatXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: sat = SatXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trail = sat.find_one_xor_linear_trail_with_fixed_weight(3, fixed_values=[key])
    +sage: trail['total_weight']
    +3.0
     
    @@ -406,7 +435,7 @@

    Navigation

    -solve(model_type, solver_name='cryptominisat', options=None)
    +solve(model_type, solver_name='CRYPTOMINISAT_EXT', options=None)

    Return the solution of the model using the solver_name SAT solver.

    Note

    @@ -439,7 +468,7 @@

    Navigation

    sage: sat.solve('cipher') # random {'cipher_id': 'tea_p64_k128_o64_r32', 'model_type': 'tea_p64_k128_o64_r32', - 'solver_name': 'cryptominisat', + 'solver_name': 'CRYPTOMINISAT_EXT', ... 'intermediate_output_31_15': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}, 'cipher_output_31_16': {'value': '8ca8d5de0906f08e', 'weight': 0, 'sign': 1}}, @@ -493,8 +522,8 @@

    Navigation

    Next topic

    @@ -517,7 +546,7 @@

    Quick search

    - +
    @@ -535,7 +564,7 @@

    Navigation

    next |
  • - previous |
  • @@ -543,7 +572,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/sat/solvers.html b/docs/build/html/cipher_modules/models/sat/solvers.html new file mode 100644 index 00000000..2ce01850 --- /dev/null +++ b/docs/build/html/cipher_modules/models/sat/solvers.html @@ -0,0 +1,243 @@ + + + + + + + + + Solvers — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Solvers

    +
    +

    Available SAT solvers

    +

    In this file, all the available SAT solvers are listed. They can be divided in +two categories: internal and external.

    +

    Internal SAT solvers should be installed by default and no further action is +needed. For any other information on internal SAT solvers, visit Abstract SAT +solver.

    +

    External SAT solvers need to be installed in the system as long as you want a +bare metal installation since they are called using a subprocess. If you use a +Docker container running the default image for the library no further action is +needed.

    +
    +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/sat/utils/mzn_predicates.html b/docs/build/html/cipher_modules/models/sat/utils/mzn_predicates.html index a93f2fa1..22bd1c36 100644 --- a/docs/build/html/cipher_modules/models/sat/utils/mzn_predicates.html +++ b/docs/build/html/cipher_modules/models/sat/utils/mzn_predicates.html @@ -1,23 +1,24 @@ - + - Mzn predicates — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Mzn predicates — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Mzn predicates

    +

    Mzn predicates

    get_word_operations()
    @@ -73,8 +74,8 @@

    Navigation

    Next topic

    @@ -97,7 +98,7 @@

    Quick search

    - +
    @@ -115,7 +116,7 @@

    Navigation

    next |
  • - previous |
  • @@ -123,7 +124,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/sat/utils/n_window_heuristic_helper.html b/docs/build/html/cipher_modules/models/sat/utils/n_window_heuristic_helper.html index 184c87dd..3077ab68 100644 --- a/docs/build/html/cipher_modules/models/sat/utils/n_window_heuristic_helper.html +++ b/docs/build/html/cipher_modules/models/sat/utils/n_window_heuristic_helper.html @@ -1,23 +1,24 @@ - + - N window heuristic helper — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + N window heuristic helper — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    N window heuristic helper

    +

    N window heuristic helper

    window_size_0_cnf(x)
    @@ -87,6 +88,26 @@

    Navigation

    window_size_5_cnf(x)
    +
    +
    +window_size_with_full_1_window_cnf(a, b, c, aux)
    +
    + +
    +
    +window_size_with_full_2_window_cnf(a, b, c, aux)
    +
    + +
    +
    +window_size_with_full_3_window_cnf(a, b, c, aux)
    +
    + +
    +
    +window_size_with_full_4_window_cnf(a, b, c, aux)
    +
    +
    @@ -98,13 +119,13 @@

    Navigation

    Previous topic

    -

    Cms cipher model

    +

    Utils

    This Page

    @@ -122,7 +143,7 @@

    Quick search

    - +
    @@ -137,10 +158,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -148,7 +169,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/sat/utils/utils.html b/docs/build/html/cipher_modules/models/sat/utils/utils.html index c5e960a3..c52c0389 100644 --- a/docs/build/html/cipher_modules/models/sat/utils/utils.html +++ b/docs/build/html/cipher_modules/models/sat/utils/utils.html @@ -1,22 +1,23 @@ - + - Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,12 +57,12 @@

    Navigation

    -

    Utils

    +

    Utils

    -

    General

    +

    General

    -

    Direct building of CNFs representing boolean equalities

    +

    Direct building of CNFs representing boolean equalities

    Building a CNF representing a generic boolean equality can be time consuming. This module offers functions to directly build the CNF of basic boolean equalities. It also offers function to directly build CNFs for the @@ -71,7 +72,7 @@

    Direct building of CNFs representing boolean equalities

    -

    Running SAT solver

    +

    Running SAT solver

    Sat Model allows to use many SAT solvers like CryptoMiniSat, Glucose, Minisat and others. Unfortunately, some of them do not take input from stdin and need an input file. Functions of this section supply the best running @@ -441,11 +442,132 @@

    Running SAT solver +
    +cnf_xor_truncated(result, variable_0, variable_1)
    +

    Return a list of strings representing the CNF of the Boolean XOR when +searching for DETERMINISTIC TRUNCATED XOR DIFFERENTIAL. I.e., an XOR +behaving as in the following table:

    + +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    variable_0

    variable_1

    result

    0

    0

    0

    0

    1

    1

    0

    2

    2

    1

    0

    1

    1

    1

    0

    1

    2

    2

    2

    0

    2

    2

    1

    2

    2

    2

    2

    +

    INPUT:

    +
      +
    • resulttuple of two strings; the result variable

    • +
    • variable_0tuple of two string; the first variable

    • +
    • variable_1tuple of two string; the second variable

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.sat.utils.utils import cnf_xor_truncated
    +sage: cnf_xor_truncated(('r0', 'r1'), ('a0', 'a1'), ('b0', 'b1'))
    +['r0 -a0',
    + 'r0 -b0',
    + 'a0 b0 -r0',
    + 'a1 b1 r0 -r1',
    + 'a1 r0 r1 -b1',
    + 'b1 r0 r1 -a1',
    + 'r0 -a1 -b1 -r1']
    +
    +
    +

    + +
    +
    +cnf_xor_truncated_seq(results, variables)
    +

    Return a list of strings representing the CNF of the Boolean XOR performed +between more than 2 inputs when searching for DETERMINISTIC TRUNCATED XOR +DIFFERENTIAL.

    +
    +

    See also

    +

    cnf_xor_truncated()

    +
    +

    INPUT:

    +
      +
    • resultslist; intermediate results + final result

    • +
    • variableslist; the variables

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.sat.utils.utils import cnf_xor_truncated_seq
    +sage: cnf_xor_truncated_seq([('i0', 'i1'), ('r0', 'r1')], [('a0', 'a1'), ('b0', 'b1'), ('c0', 'c1')])
    +['i0 -a0',
    + 'i0 -b0',
    + 'a0 b0 -i0',
    + ...
    + 'i1 r0 r1 -c1',
    + 'c1 r0 r1 -i1',
    + 'r0 -i1 -c1 -r1']
    +
    +
    +
    +
    create_numerical_cnf(cnf)
    +
    +
    +modadd_truncated(result, variable_0, variable_1, carry, next_carry)
    +
    + +
    +
    +modadd_truncated_lsb(result, variable_0, variable_1, next_carry)
    +
    + +
    +
    +modadd_truncated_msb(result, variable_0, variable_1, carry)
    +
    +
    numerical_cnf_to_dimacs(number_of_variables, numerical_cnf)
    @@ -453,25 +575,25 @@

    Running SAT solver
    -run_minisat(options, dimacs_input, input_file_name, output_file_name)
    +run_minisat(solver_specs, options, dimacs_input, input_file_name, output_file_name)

    Call the MiniSat solver specified in solver_specs, using input and output files.

    -run_parkissat(options, dimacs_input, input_file_name)
    +run_parkissat(solver_specs, options, dimacs_input, input_file_name)

    Call the Parkissat solver specified in solver_specs, using input and output files.

    -run_sat_solver(solver_name, options, dimacs_input, host=None, env_vars_string='')
    +run_sat_solver(solver_specs, options, dimacs_input, host=None, env_vars_string='')

    Call the SAT solver specified in solver_specs, using input and output pipes.

    -run_yices(options, dimacs_input, input_file_name)
    +run_yices(solver_specs, options, dimacs_input, input_file_name)

    Call the Yices SAT solver specified in solver_specs, using input file.

    @@ -504,8 +626,8 @@

    Previous topic

    This Page

    @@ -523,7 +645,7 @@

    Quick search

    - +
    @@ -538,7 +660,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/cipher_modules/models/smt/smt_model.html b/docs/build/html/cipher_modules/models/smt/smt_model.html index 265f03a7..3cc56fc3 100644 --- a/docs/build/html/cipher_modules/models/smt/smt_model.html +++ b/docs/build/html/cipher_modules/models/smt/smt_model.html @@ -1,23 +1,24 @@ - + - Smt model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Smt model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,44 +57,18 @@

    Navigation

    -

    Smt model

    +

    Smt model

    -

    SMT standard of Cipher

    +

    SMT standard of Cipher

    The target of this class is to build, solve and retrieve the solution of an SMT CNF representing some attacks on ciphers, e.g. the generic cipher inversion or the search for XOR differential trails (for SAT CNFs see the correspondent class Sat Model). SMT-LIB is the chosen standard.

    -

    An SMT solver is called by a subprocess, therefore note also that you will not -be able to solve the models in the SMT-LIB files until you have installed one -SMT solver at least. In methods, solvers are chosen by solver_name -variable. Solvers and their corresponding values for solver_name variable -are:

    -
    -
    ---- - - - - - - - - - - - - - - - - -

    SMT solver

    value

    Z3

    'z3'

    Yices-smt2

    'yices-smt2'

    MathSAT

    'mathsat'

    -
    -

    The default choice is z3.

    +

    This module is able to use many different SMT solvers.

    +

    For any further information, refer to the file +claasp.cipher_modules.models.smt.solvers.py and to the section +Available SMT solvers.

    class SmtModel(cipher, counter='sequential')
    @@ -195,7 +170,7 @@

    Navigation

    -solve(model_type, solver_name='z3')
    +solve(model_type, solver_name='Z3_EXT')

    Return the solution of the model using the solver_name SMT solver.

    INPUT:

      @@ -206,7 +181,7 @@

      Navigation

    • 'xor_linear'

    -
  • solver_namestring (default: z3); the name of the solver

  • +
  • solver_namestring (default: Z3_EXT); the name of the solver

  • See also

    @@ -220,7 +195,7 @@

    Navigation

    sage: smt.solve('xor_differential') # random {'cipher_id': 'speck_p32_k64_o32_r4', 'model_type': 'xor_differential', - 'solver_name': 'z3', + 'solver_name': 'Z3_EXT', 'solving_time_seconds': 0.0, 'memory_megabytes': 0.09, 'components_values': {}, @@ -288,13 +263,13 @@

    Table of Contents

    This Page

    @@ -312,7 +287,7 @@

    Quick search

    - +
    @@ -327,10 +302,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -338,7 +313,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/smt/smt_models/smt_cipher_model.html b/docs/build/html/cipher_modules/models/smt/smt_models/smt_cipher_model.html index 47af7338..562e04c4 100644 --- a/docs/build/html/cipher_modules/models/smt/smt_models/smt_cipher_model.html +++ b/docs/build/html/cipher_modules/models/smt/smt_models/smt_cipher_model.html @@ -1,23 +1,24 @@ - + - Smt cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Smt cipher model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Smt cipher model

    +

    Smt cipher model

    class SmtCipherModel(cipher, counter='sequential')
    -

    Bases: claasp.cipher_modules.models.smt.smt_model.SmtModel

    +

    Bases: SmtModel

    build_cipher_model(fixed_variables=[])
    @@ -121,7 +122,7 @@

    Navigation

    -find_missing_bits(fixed_values=[], solver_name='z3')
    +find_missing_bits(fixed_values=[], solver_name='Z3_EXT')

    Return the solution representing a generic flow of the cipher from plaintext and key to ciphertext.

    INPUT:

      @@ -146,7 +147,7 @@

      Navigation

      sage: smt.find_missing_bits(fixed_values=[ciphertext]) # random {'cipher_id': 'speck_k64_p32_o32_r22', 'model_type': 'speck_k64_p32_o32_r22', - 'solver_name': 'cryptominisat', + 'solver_name': 'Z3_EXT', ... 'intermediate_output_21_11': {'value': '90fe', 'weight': 0}, 'cipher_output_21_12': {'value': 'affec7ed', 'weight': 0}}, @@ -219,7 +220,7 @@

      Navigation

      -solve(model_type, solver_name='z3')
      +solve(model_type, solver_name='Z3_EXT')

      Return the solution of the model using the solver_name SMT solver.

      INPUT:

        @@ -230,7 +231,7 @@

        Navigation

      • 'xor_linear'

      -
    • solver_namestring (default: z3); the name of the solver

    • +
    • solver_namestring (default: Z3_EXT); the name of the solver

    See also

    @@ -244,7 +245,7 @@

    Navigation

    sage: smt.solve('xor_differential') # random {'cipher_id': 'speck_p32_k64_o32_r4', 'model_type': 'xor_differential', - 'solver_name': 'z3', + 'solver_name': 'Z3_EXT', 'solving_time_seconds': 0.0, 'memory_megabytes': 0.09, 'components_values': {}, @@ -286,13 +287,13 @@

    Navigation

    - +
    @@ -325,10 +326,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -336,7 +337,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/smt/smt_models/smt_deterministic_truncated_xor_differential_model.html b/docs/build/html/cipher_modules/models/smt/smt_models/smt_deterministic_truncated_xor_differential_model.html index 3086a2e0..e9055b45 100644 --- a/docs/build/html/cipher_modules/models/smt/smt_models/smt_deterministic_truncated_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/smt/smt_models/smt_deterministic_truncated_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Smt deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Smt deterministic truncated xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Smt deterministic truncated xor differential model

    +

    Smt deterministic truncated xor differential model

    class SmtDeterministicTruncatedXorDifferentialModel(cipher, counter='sequential')
    -

    Bases: claasp.cipher_modules.models.smt.smt_model.SmtModel

    +

    Bases: SmtModel

    calculate_component_weight(component, out_suffix, output_values_dict)
    @@ -158,7 +159,7 @@

    Navigation

    -solve(model_type, solver_name='z3')
    +solve(model_type, solver_name='Z3_EXT')

    Return the solution of the model using the solver_name SMT solver.

    INPUT:

      @@ -169,7 +170,7 @@

      Navigation

    • 'xor_linear'

    -
  • solver_namestring (default: z3); the name of the solver

  • +
  • solver_namestring (default: Z3_EXT); the name of the solver

  • See also

    @@ -183,7 +184,7 @@

    Navigation

    sage: smt.solve('xor_differential') # random {'cipher_id': 'speck_p32_k64_o32_r4', 'model_type': 'xor_differential', - 'solver_name': 'z3', + 'solver_name': 'Z3_EXT', 'solving_time_seconds': 0.0, 'memory_megabytes': 0.09, 'components_values': {}, @@ -225,13 +226,13 @@

    Navigation

    This Page

    @@ -249,7 +250,7 @@

    Quick search

    - +
    @@ -264,10 +265,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -275,7 +276,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/smt/smt_models/smt_xor_differential_model.html b/docs/build/html/cipher_modules/models/smt/smt_models/smt_xor_differential_model.html index 79599b42..dcc97159 100644 --- a/docs/build/html/cipher_modules/models/smt/smt_models/smt_xor_differential_model.html +++ b/docs/build/html/cipher_modules/models/smt/smt_models/smt_xor_differential_model.html @@ -1,23 +1,24 @@ - + - Smt xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Smt xor differential model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Smt xor differential model

    +

    Smt xor differential model

    class SmtXorDifferentialModel(cipher, counter='sequential')
    -

    Bases: claasp.cipher_modules.models.smt.smt_model.SmtModel

    +

    Bases: SmtModel

    build_xor_differential_trail_model(weight=- 1, fixed_variables=[])
    @@ -125,35 +126,41 @@

    Navigation

    -find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='z3')
    -

    Return a list of solutions containing all the XOR differential trails having the fixed_weight weight.

    +find_all_xor_differential_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='Z3_EXT') +

    Return a list of solutions containing all the XOR differential trails having the fixed_weight weight. +By default, the search is set in the single-key setting.

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); they can be created using set_fixed_variables in method

    • -
    • solver_namestring (default: z3); the name of the solver

    • +
    • solver_namestring (default: Z3_EXT); the name of the solver

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: smt = SmtXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    +sage: trails = smt.find_all_xor_differential_trails_with_fixed_weight(9)
    +sage: len(trails)
    +2
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: smt = SmtXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=integer_to_bit_list(0, 64, 'big'))
    -sage: trails = smt.find_all_xor_differential_trails_with_fixed_weight(9, fixed_values=[plaintext, key])
    +....:     bit_values=[0]*64)
    +sage: trails = smt.find_all_xor_differential_trails_with_fixed_weight(2, fixed_values=[key])
     sage: len(trails)
     2
     
    @@ -162,8 +169,9 @@

    Navigation

    -find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='z3')
    -

    Return a list of solutions.

    +find_all_xor_differential_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='Z3_EXT') +

    Return a list of solutions. +By default, the search is set in the single-key setting.

    The list contains all the XOR differential trails having the weight lying in the interval [min_weight, max_weight].

    INPUT:

    @@ -171,39 +179,45 @@

    Navigation

  • min_weightinteger; the weight from which to start the search

  • max_weightinteger; the weight at which the search stops

  • fixed_valueslist (default: []); they can be created using set_fixed_variables method

  • -
  • solver_namestring (default: z3); the name of the solver

  • +
  • solver_namestring (default: Z3_EXT); the name of the solver

  • EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: smt = SmtXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    +sage: trails = smt.find_all_xor_differential_trails_with_weight_at_most(9, 10)
    +sage: len(trails)
    +28
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: smt = SmtXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=integer_to_bit_list(0, 64, 'big'))
    -sage: trails = smt.find_all_xor_differential_trails_with_weight_at_most(9, 10, fixed_values=[plaintext, key])
    +....:     bit_values=[0]*64)
    +sage: trails = smt.find_all_xor_differential_trails_with_weight_at_most(2, 3, fixed_values=[key])
     sage: len(trails)
    -28
    +9
     
    -find_lowest_weight_xor_differential_trail(fixed_values=[], solver_name='z3')
    -

    Return the solution representing a trail with the lowest weight.

    +find_lowest_weight_xor_differential_trail(fixed_values=[], solver_name='Z3_EXT') +

    Return the solution representing a trail with the lowest weight. +By default, the search is set in the single-key setting.

    Note

    There could be more than one trail with the lowest weight. In order to find all the lowest weight trail, @@ -212,77 +226,92 @@

    Navigation

    INPUT:

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: z3); the name of the solver

    • +
    • solver_namestring (default: Z3_EXT); the name of the solver

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: smt = SmtXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    +sage: trail = smt.find_lowest_weight_xor_differential_trail()
    +sage: trail['total_weight']
    +9.0
    +
    +# related-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: smt = SmtXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=integer_to_bit_list(0, 64, 'big'))
    -sage: trail = smt.find_lowest_weight_xor_differential_trail(fixed_values=[plaintext, key])
    +....:     bit_values=[0]*64)
    +sage: trail = smt.find_lowest_weight_xor_differential_trail(fixed_values=[key])
     sage: trail['total_weight']
    -9.0
    +1.0
     
    -find_one_xor_differential_trail(fixed_values=[], solver_name='z3')
    -

    Return the solution representing a XOR differential trail.

    -

    The solution probability is almost always lower than the one of a random guess of the longest input.

    +find_one_xor_differential_trail(fixed_values=[], solver_name='Z3_EXT') +

    Return the solution representing a XOR differential trail. +By default, the search is set in the single-key setting. +The solution probability is almost always lower than the one of a random guess of the longest input.

    INPUT:

    • fixed_valueslist (default: []); can be created using set_fixed_variables method

    • -
    • solver_namestring (default: z3); the name of the solver

    • +
    • solver_namestring (default: Z3_EXT); the name of the solver

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=5)
     sage: smt = SmtXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: smt.find_one_xor_differential_trail(fixed_values=[plaintext]) # random
    +sage: smt.find_one_xor_differential_trail() # random
     {'cipher_id': 'speck_p32_k64_o32_r5',
      'model_type': 'xor_differential',
    - 'solver_name': 'z3',
    + 'solver_name': 'Z3_EXT',
      'solving_time_seconds': 0.05,
      'memory_megabytes': 19.28,
      ...
      'total_weight': 93,
      'building_time_seconds': 0.002946615219116211}
    +
    + # related-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: smt = SmtXorDifferentialModel(speck)
    +sage: key = set_fixed_variables(
    +....:     component_id='key',
    +....:     constraint_type='not_equal',
    +....:     bit_positions=range(64),
    +....:     bit_values=[0]*64)
    +sage: smt.find_one_xor_differential_trail(fixed_values=[key]) # random
     
    -find_one_xor_differential_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='z3')
    -

    Return the solution representing a XOR differential trail whose probability is 2 ** fixed_weight.

    +find_one_xor_differential_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='Z3_EXT') +

    Return the solution representing a XOR differential trail whose probability is 2 ** fixed_weight. +By default, the search is set in the single-key setting.

    INPUT:

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +
    # single-key setting
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: smt = SmtXorDifferentialModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:     component_id='plaintext',
    -....:     constraint_type='not_equal',
    -....:     bit_positions=range(32),
    -....:     bit_values=(0,)*32)
    +sage: trail = smt.find_one_xor_differential_trail_with_fixed_weight(3)
    +sage: trail['total_weight']
    +3.0
    +
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_differential_model import SmtXorDifferentialModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(number_of_rounds=5)
    +sage: smt = SmtXorDifferentialModel(speck)
     sage: key = set_fixed_variables(
     ....:     component_id='key',
    -....:     constraint_type='equal',
    +....:     constraint_type='not_equal',
     ....:     bit_positions=range(64),
    -....:     bit_values=(0,)*64)
    -sage: result = smt.find_one_xor_differential_trail_with_fixed_weight(3, fixed_values=[plaintext, key])
    -sage: result['total_weight']
    +....:     bit_values=[0]*64)
    +sage: trail = smt.find_one_xor_differential_trail_with_fixed_weight(3, fixed_values=[key])
    +sage: trail['total_weight']
     3.0
     
    @@ -385,7 +418,7 @@

    Navigation

    -solve(model_type, solver_name='z3')
    +solve(model_type, solver_name='Z3_EXT')

    Return the solution of the model using the solver_name SMT solver.

    INPUT:

      @@ -396,7 +429,7 @@

      Navigation

    • 'xor_linear'

    -
  • solver_namestring (default: z3); the name of the solver

  • +
  • solver_namestring (default: Z3_EXT); the name of the solver

  • See also

    @@ -410,7 +443,7 @@

    Navigation

    sage: smt.solve('xor_differential') # random {'cipher_id': 'speck_p32_k64_o32_r4', 'model_type': 'xor_differential', - 'solver_name': 'z3', + 'solver_name': 'Z3_EXT', 'solving_time_seconds': 0.0, 'memory_megabytes': 0.09, 'components_values': {}, @@ -452,13 +485,13 @@

    Navigation

    This Page

    @@ -476,7 +509,7 @@

    Quick search

    - +
    @@ -491,10 +524,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -502,7 +535,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/smt/smt_models/smt_xor_linear_model.html b/docs/build/html/cipher_modules/models/smt/smt_models/smt_xor_linear_model.html index a78e7465..334eceb4 100644 --- a/docs/build/html/cipher_modules/models/smt/smt_models/smt_xor_linear_model.html +++ b/docs/build/html/cipher_modules/models/smt/smt_models/smt_xor_linear_model.html @@ -1,23 +1,24 @@ - + - Smt xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Smt xor linear model — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Smt xor linear model

    +

    Smt xor linear model

    class SmtXorLinearModel(cipher, counter='sequential')
    -

    Bases: claasp.cipher_modules.models.smt.smt_model.SmtModel

    +

    Bases: SmtModel

    branch_xor_linear_constraints()
    @@ -171,14 +172,15 @@

    Navigation

    -find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='z3')
    +find_all_xor_linear_trails_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='Z3_EXT')

    Return a list of solutions containing all the XOR linear trails having weight equal to fixed_weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    INPUT:

    • fixed_weightinteger; the weight to be fixed

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: z3); the name of the solver

    • +
    • solver_namestring (default: Z3_EXT); the name of the solver

    See also

    @@ -187,25 +189,31 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: smt = SmtXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trails = smt.find_all_xor_linear_trails_with_fixed_weight(2, fixed_values=[plaintext])
    +sage: trails = smt.find_all_xor_linear_trails_with_fixed_weight(1)
     sage: len(trails)
    -2
    +4
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: smt = SmtXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trails = smt.find_all_xor_linear_trails_with_fixed_weight(2, fixed_values=[key]) # long
    +sage: len(trails)
    +8
     
    -find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='z3')
    -

    Return a list of solutions.

    +find_all_xor_linear_trails_with_weight_at_most(min_weight, max_weight, fixed_values=[], solver_name='Z3_EXT') +

    Return a list of solutions. +By default, the search removes the key schedule, if any.

    The list contains all the XOR linear trails having the weight lying in the interval [min_weight, max_weight].

    INPUT:

    @@ -213,7 +221,7 @@

    Navigation

  • min_weightinteger; the weight from which to start the search

  • max_weightinteger; the weight at which the search stops

  • fixed_valueslist (default: []); they can be created using set_fixed_variables method

  • -
  • solver_namestring (default: z3); the name of the solver

  • +
  • solver_namestring (default: Z3_EXT); the name of the solver

  • See also

    @@ -222,25 +230,31 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: smt = SmtXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trails = smt.find_all_xor_linear_trails_with_weight_at_most(2, 3, fixed_values=[plaintext])
    +sage: trails = smt.find_all_xor_linear_trails_with_weight_at_most(0, 2) # long
     sage: len(trails)
    -11
    +187
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: smt = SmtXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trails = smt.find_all_xor_linear_trails_with_weight_at_most(0, 3, fixed_values=[key])
    +sage: len(trails)
    +73
     
    -find_lowest_weight_xor_linear_trail(fixed_values=[], solver_name='z3')
    +find_lowest_weight_xor_linear_trail(fixed_values=[], solver_name='Z3_EXT')

    Return the solution representing a XOR LINEAR trail with the lowest possible weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    Note

    @@ -250,7 +264,7 @@

    Navigation

    INPUT:

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: z3); the name of the solver

    • +
    • solver_namestring (default: Z3_EXT); the name of the solver

    See also

    @@ -259,31 +273,37 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: smt = SmtXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: trail = smt.find_lowest_weight_xor_linear_trail(fixed_values=[plaintext])
    +sage: trail = smt.find_lowest_weight_xor_linear_trail()
     sage: trail['total_weight']
     2.0
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=4)
    +sage: smt = SmtXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(32)), [0] * 32)
    +sage: trail = smt.find_lowest_weight_xor_linear_trail(fixed_values=[key])
    +sage: trail['total_weight']
    +3.0
     
    -find_one_xor_linear_trail(fixed_values=[], solver_name='z3')
    +find_one_xor_linear_trail(fixed_values=[], solver_name='Z3_EXT')

    Return the solution representing a XOR linear trail. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    The solution probability is almost always lower than the one of a random guess of the longest input.

    INPUT:

    • fixed_valueslist (default: []); they can be created using set_fixed_variables method

    • -
    • solver_namestring (default: z3); the name of the solver

    • +
    • solver_namestring (default: Z3_EXT); the name of the solver

    See also

    @@ -292,31 +312,34 @@

    Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list
     sage: speck = SpeckBlockCipher(number_of_rounds=4)
     sage: smt = SmtXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=integer_to_bit_list(0, 32, 'big'))
    -sage: smt.find_one_xor_linear_trail(fixed_values=[plaintext]) #random
    +sage: smt.find_one_xor_linear_trail() #random
     {'cipher_id': 'speck_p32_k64_o32_r4',
      'model_type': 'xor_linear',
    - 'solver_name': 'z3',
    + 'solver_name': 'Z3_EXT',
      'solving_time_seconds': 0.06,
      'memory_megabytes': 19.65,
      ...
      'total_weight': 67,
      'building_time_seconds': 0.003168344497680664}
    +
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)
    +sage: smt = SmtXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(64)), [0] * 64)
    +sage: smt.find_one_xor_linear_trail(fixed_values=[key]) #random
     
    -find_one_xor_linear_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='z3')
    +find_one_xor_linear_trail_with_fixed_weight(fixed_weight, fixed_values=[], solver_name='Z3_EXT')

    Return the solution representing a XOR linear trail whose weight is fixed_weight. +By default, the search removes the key schedule, if any. By default, the weight corresponds to the negative base-2 logarithm of the correlation of the trail.

    INPUT:

      @@ -330,18 +353,23 @@

      Navigation

    EXAMPLES:

    sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
    -sage: from claasp.cipher_modules.models.utils import set_fixed_variables
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
     sage: speck = SpeckBlockCipher(number_of_rounds=3)
     sage: smt = SmtXorLinearModel(speck)
    -sage: plaintext = set_fixed_variables(
    -....:         component_id='plaintext',
    -....:         constraint_type='not_equal',
    -....:         bit_positions=range(32),
    -....:         bit_values=(0,)*32)
    -sage: result = smt.find_one_xor_linear_trail_with_fixed_weight(7, fixed_values=[plaintext])
    -sage: result['total_weight']
    +sage: trail = smt.find_one_xor_linear_trail_with_fixed_weight(7)
    +sage: trail['total_weight']
     7.0
    +
    +# including the key schedule in the model
    +sage: from claasp.cipher_modules.models.smt.smt_models.smt_xor_linear_model import SmtXorLinearModel
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: from claasp.cipher_modules.models.utils import set_fixed_variables
    +sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=4)
    +sage: smt = SmtXorLinearModel(speck)
    +sage: key = set_fixed_variables('key', 'not_equal', list(range(16)), [0] * 16)
    +sage: trail = smt.find_one_xor_linear_trail_with_fixed_weight(3, fixed_values=[key])
    +sage: trail['total_weight']
    +3.0
     
    @@ -435,7 +463,7 @@

    Navigation

    -solve(model_type, solver_name='z3')
    +solve(model_type, solver_name='Z3_EXT')

    Return the solution of the model using the solver_name SMT solver.

    INPUT:

      @@ -446,7 +474,7 @@

      Navigation

    • 'xor_linear'

    -
  • solver_namestring (default: z3); the name of the solver

  • +
  • solver_namestring (default: Z3_EXT); the name of the solver

  • See also

    @@ -460,7 +488,7 @@

    Navigation

    sage: smt.solve('xor_differential') # random {'cipher_id': 'speck_p32_k64_o32_r4', 'model_type': 'xor_differential', - 'solver_name': 'z3', + 'solver_name': 'Z3_EXT', 'solving_time_seconds': 0.0, 'memory_megabytes': 0.09, 'components_values': {}, @@ -507,13 +535,13 @@

    Navigation

    Previous topic

    -

    Smt cipher model

    +

    Solvers

    Next topic

    -

    Utils

    +

    Smt cipher model

    This Page

    @@ -531,7 +559,7 @@

    Quick search

    - +
    @@ -546,10 +574,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -557,7 +585,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/smt/solvers.html b/docs/build/html/cipher_modules/models/smt/solvers.html new file mode 100644 index 00000000..6e997a8b --- /dev/null +++ b/docs/build/html/cipher_modules/models/smt/solvers.html @@ -0,0 +1,239 @@ + + + + + + + + + Solvers — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Solvers

    +
    +

    Available SMT solvers

    +

    In this file, all the available SMT solvers are listed. They are only external.

    +

    External SMT solvers need to be installed in the system as long as you want a +bare metal installation since they are called using a subprocess. If you use a +Docker container running the default image for the library no further action is +needed.

    +
    +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/models/smt/utils/utils.html b/docs/build/html/cipher_modules/models/smt/utils/utils.html index ec2158a8..fe96012c 100644 --- a/docs/build/html/cipher_modules/models/smt/utils/utils.html +++ b/docs/build/html/cipher_modules/models/smt/utils/utils.html @@ -1,23 +1,24 @@ - + - Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Utils

    +

    Utils

    get_component_hex_value(component, out_suffix, variable2value)
    @@ -262,13 +263,13 @@

    Navigation

    Next topic

    -

    Milp model

    +

    Algebraic model

    This Page

    @@ -286,7 +287,7 @@

    Quick search

    - +
    @@ -301,10 +302,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -312,7 +313,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/models/utils.html b/docs/build/html/cipher_modules/models/utils.html index f6072bea..0838f979 100644 --- a/docs/build/html/cipher_modules/models/utils.html +++ b/docs/build/html/cipher_modules/models/utils.html @@ -1,23 +1,24 @@ - + - Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Utils — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Utils

    +

    Utils

    add_arcs(arcs, component, curr_input_bit_ids, input_bit_size, intermediate_output_arcs, previous_output_bit_ids)
    @@ -64,7 +65,7 @@

    Navigation

    -convert_solver_solution_to_dictionary(cipher_id, model_type, solver_name, solve_time, memory, components_values, total_weight)
    +convert_solver_solution_to_dictionary(cipher, model_type, solver_name, solve_time, memory, components_values, total_weight)

    Return a dictionary that represents the solution obtained from the solver.

    INPUT:

    @@ -604,10 +605,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -615,7 +616,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/neural_network_tests.html b/docs/build/html/cipher_modules/neural_network_tests.html index 0f4bd94b..e2cc3646 100644 --- a/docs/build/html/cipher_modules/neural_network_tests.html +++ b/docs/build/html/cipher_modules/neural_network_tests.html @@ -1,23 +1,24 @@ - + - Neural network tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Neural network tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Neural network tests

    +

    Neural network tests

    @@ -68,13 +69,13 @@

    Neural network tests

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/report.html b/docs/build/html/cipher_modules/report.html new file mode 100644 index 00000000..0dfc4d3f --- /dev/null +++ b/docs/build/html/cipher_modules/report.html @@ -0,0 +1,274 @@ + + + + + + + + + Report — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Report

    +
    +
    +class Report(test_report)
    +

    Bases: object

    +
    +
    +clean_reports(output_dir='/home/runner/_work/claasp/claasp/docs/test_reports')
    +
    + +
    +
    +save_as_DataFrame(output_dir='/home/runner/_work/claasp/claasp/docs/test_reports', fixed_input=None, fixed_output=None, fixed_test=None)
    +
    + +
    +
    +save_as_image(show_as_hex=False, test_name=None, fixed_input=None, fixed_output=None, fixed_input_difference=None, word_size=1, state_size=1, key_state_size=1, output_directory='/home/runner/_work/claasp/claasp/docs/test_reports', verbose=False, show_word_permutation=False, show_var_shift=False, show_var_rotate=False, show_theta_xoodoo=False, show_theta_keccak=False, show_shift_rows=False, show_sigma=False, show_reverse=False, show_permuation=False, show_multi_input_non_linear_logical_operator=False, show_modular=False, show_modsub=False, show_constant=False, show_rot=False, show_sbox=False, show_mix_column=False, show_shift=False, show_linear_layer=False, show_xor=False, show_modadd=False, show_and=False, show_or=False, show_not=False, show_plaintext=True, show_key=True, show_intermediate_output=True, show_cipher_output=True, show_input=True, show_output=True)
    +

    Prints the graphical representation of the Report.

    +

    INPUT:

    +

    word_sizeinteger: the word_size to be used for the trail representation +state_sizeinteger: the state_size to be used for the trail representation +key_state_sizeinteger: the key_state_size to be used for the trail representation +output_directorystring: the directory in which to store the reports +verbosebool: determines wether to print out a verbose output or not +show_*bool: boolean value to determine wether to display each specific component when visualizing a trail

    +

    EXAMPLES:

    +
    +

    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher +sage: from claasp.cipher_modules.report import Report +sage: speck = SpeckBlockCipher(number_of_rounds=5) +sage: avalanche_test_results = speck.diffusion_tests() +sage: report = Report(speck, avalanche_test_results) +sage: report.save_as_image()

    +
    +
    + +
    +
    +save_as_json(output_dir='/home/runner/_work/claasp/claasp/docs/test_reports', fixed_input=None, fixed_output=None, fixed_test=None)
    +
    + +
    +
    +save_as_latex_table(output_dir='/home/runner/_work/claasp/claasp/docs/test_reports', fixed_input=None, fixed_output=None, fixed_test=None)
    +
    + +
    +
    +show(show_as_hex=False, test_name=None, fixed_input='plaintext', fixed_output='round_output', fixed_input_difference='average', word_size=1, state_size=1, key_state_size=1, verbose=False, show_word_permutation=False, show_var_shift=False, show_var_rotate=False, show_theta_xoodoo=False, show_theta_keccak=False, show_shift_rows=False, show_sigma=False, show_reverse=False, show_permuation=False, show_multi_input_non_linear_logical_operator=False, show_modular=False, show_modsub=False, show_constant=False, show_rot=False, show_sbox=False, show_mix_column=False, show_shift=False, show_linear_layer=False, show_xor=False, show_modadd=False, show_and=False, show_or=False, show_not=False, show_plaintext=True, show_key=True, show_intermediate_output=True, show_cipher_output=True, show_input=True, show_output=True)
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/cipher_modules/statistical_tests/dataset_generator.html b/docs/build/html/cipher_modules/statistical_tests/dataset_generator.html index 95bdfafb..d7cabec9 100644 --- a/docs/build/html/cipher_modules/statistical_tests/dataset_generator.html +++ b/docs/build/html/cipher_modules/statistical_tests/dataset_generator.html @@ -1,23 +1,24 @@ - + - Dataset generator — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dataset generator — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Dataset generator

    +

    Dataset generator

    class DatasetGenerator(cipher)
    @@ -264,7 +265,7 @@

    Navigation

    class DatasetType(value)
    -

    Bases: enum.Enum

    +

    Bases: Enum

    An enumeration.

    @@ -319,13 +320,13 @@

    Navigation

    This Page

    @@ -343,7 +344,7 @@

    Quick search

    - +
    @@ -358,10 +359,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -369,7 +370,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/statistical_tests/dieharder_statistical_tests.html b/docs/build/html/cipher_modules/statistical_tests/dieharder_statistical_tests.html index eaaa31ca..2f93486d 100644 --- a/docs/build/html/cipher_modules/statistical_tests/dieharder_statistical_tests.html +++ b/docs/build/html/cipher_modules/statistical_tests/dieharder_statistical_tests.html @@ -1,23 +1,24 @@ - + - Dieharder statistical tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Dieharder statistical tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,381 +57,41 @@

    Navigation

    -

    Dieharder statistical tests

    +

    Dieharder statistical tests

    class DieharderTests(cipher)

    Bases: object

    -
    -static _generate_chart_all(report_dict_list)
    -

    Generate the corresponding chart based on the parsed report dictionary.

    -

    INPUT:

    -
      -
    • report_dict – the parsed result in a dictionary format

    • -
    -

    OUTPUT:

    -
      -
    • save the chart with filename -f’dieharder_{report_dict[“data_type”]}_{report_dict[“cipher_name”]}_round_{report_dict[“round”]}.png’

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: result = DieharderTests.run_dieharder_statistical_tests_tool_interactively( # doctest: +SKIP
    -....:     f'claasp/cipher_modules/statistical_tests/input_data_example', # doctest: +SKIP
    -....: ) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -
    -sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: dict = DieharderTests.parse_report(f'dieharder_test_output.txt') # doctest: +SKIP
    -Parsing dieharder_test_output.txt is in progress.
    -Parsing dieharder_test_output.txt is finished.
    -
    -sage: dict['data_type'] = 'random' # doctest: +SKIP
    -sage: dict['cipher_name'] = 'toy_cipher' # doctest: +SKIP
    -sage: dict['round'] = 1 # doctest: +SKIP
    -sage: dict['rounds'] = 1 # doctest: +SKIP
    -sage: dict_list = [dict] # doctest: +SKIP
    -sage: DieharderTests._generate_chart_all(dict_list) # doctest: +SKIP
    -Drawing chart for all rounds is in progress.
    -Drawing chart for all rounds is in finished. Please find the chart in file dieharder_random_toy_cipher.png.
    -
    -
    -
    - -
    -
    -static _generate_chart_round(report_dict)
    -

    Generate the corresponding chart based on the parsed report dictionary.

    -

    INPUT:

    -
      -
    • report_dict – the parsed result in a dictionary format

    • -
    -

    OUTPUT:

    -
      -
    • save the chart with filename -f’dieharder_{report_dict[“data_type”]}_{report_dict[“cipher_name”]}_round_{report_dict[“round”]}.png’

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: result = DieharderTests.run_dieharder_statistical_tests_tool_interactively( # doctest: +SKIP
    -....:     f'claasp/cipher_modules/statistical_tests/input_data_example', # doctest: +SKIP
    -....: ) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -
    -sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: dict = DieharderTests.parse_report(f'dieharder_test_output.txt') # doctest: +SKIP
    -Parsing dieharder_test_output.txt is in progress.
    -Parsing dieharder_test_output.txt is finished.
    -
    -sage: dict['data_type'] = 'random' # doctest: +SKIP
    -sage: dict['data_type'] = 'random' # doctest: +SKIP
    -sage: dict['cipher_name'] = 'toy_cipher' # doctest: +SKIP
    -sage: dict['round'] = 1 # doctest: +SKIP
    -sage: dict['rounds'] = 1 # doctest: +SKIP
    -sage: DieharderTests._generate_chart_round(dict) # doctest: +SKIP
    -Drawing round 1 is in progress.
    -Drawing round 1 is finished. Please find the chart in file dieharder_random_toy_cipher_round_1.png.
    -
    -
    -
    - -
    -
    -static parse_report(report_filename)
    -

    Parse the dieharder statistical tests report. It will return the parsed result in a dictionary format.

    -

    INPUT:

    -
      -
    • report_filename – the filename of the report you need to parse

    • -
    -

    OUTPUT:

    -
      -
    • report_dict – return the parsed result in a dictionary format

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: result = DieharderTests.run_dieharder_statistical_tests_tool_interactively( # doctest: +SKIP
    -....:     f'claasp/cipher_modules/statistical_tests/input_data_example', # doctest: +SKIP
    -....: ) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -
    -sage: dict = DieharderTests.parse_report(f'dieharder_test_output.txt') # doctest: +SKIP
    -Parsing dieharder_test_output.txt is in progress.
    -Parsing dieharder_test_output.txt is finished.
    -
    -
    -
    - -
    -
    -run_CBC_dieharder_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, number_of_blocks_in_one_sample=8192, round_start=0, round_end=0, dieharder_report_folder_prefix='dieharder_statistics_report', FLAG_CHART=False)
    -

    Run the CBC test.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, inputs=[key, plaintext], -input_index=0 means it will generate the key avalanche dataset. if input_index=1 means it will generate the -plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should be -passed to the statistical test tool.

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • number_of_blocks_in_one_sampleinteger (default: 8192); how many blocks should be generated in -one test sequence

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • dieharder_report_folder_prefixstring (default: dieharder_statistics_report); the folder to save -the generated statistics report from NIST STS

    • -
    • FLAG_CHARTboolean (default: False); draw the chart from dieharder statistical test if set to -True

    • -
    -

    OUTPUT:

    -
      -
    • dieharder_report_dicts – Dictionary-structure result parsed from dieharder statistical report. One could -also see the corresponding report generated under the folder dieharder_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: F = DieharderTests(SpeckBlockCipher(number_of_rounds=3)) # doctest: +SKIP
    -sage: result = F.run_CBC_dieharder_statistics_test(0, 5, 5, round_end=1) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -...
    -
    -
    -
    - -
    -
    -run_avalanche_dieharder_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, round_start=0, round_end=0, dieharder_report_folder_prefix='dieharder_statistics_report', FLAG_CHART=False)
    -

    Run the avalanche test.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, -inputs=[key, plaintext], input_index=0 means it will generate the key avalanche dataset. if input_index=1 -means it will generate the plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should -be passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0). If set to 0, means run to the last round

    • -
    • dieharder_report_folder_prefixstring (default: dieharder_statistics_report); the folder to save -the generated statistics report from NIST STS

    • -
    • FLAG_CHARTboolean (default: False); draw the chart from dieharder statistical test if set to -True

    • -
    -

    OUTPUT:

    -
      -
    • dieharder_report_dicts – Dictionary-structure result parsed from dieharder statistical report. One could -also see the corresponding report under the dieharder_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: F = DieharderTests(SpeckBlockCipher(number_of_rounds=3)) # doctest: +SKIP
    -sage: result = F.run_avalanche_dieharder_statistics_test(0, 5, 5, round_end=1) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -...
    -
    -
    -
    - -
    -
    -run_correlation_dieharder_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, number_of_blocks_in_one_sample=8128, round_start=0, round_end=0, dieharder_report_folder_prefix='dieharder_statistics_report', FLAG_CHART=False)
    -

    Run the correlation test.

    +
    +dieharder_statistical_tests(test_type, bits_in_one_sequence_dieharder='default', number_of_sequences_dieharder='default', input_index=0, round_start=0, round_end=0, dieharder_report_folder_prefix='dieharder_statistics_report', dieharder_test_option=None)
    +

    Run the Dieharder statistical tests.

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, inputs=[key, plaintext], -input_index=0 means it will generate the key avalanche dataset. if input_index=1 means it will generate the -plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should be -passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • number_of_blocks_in_one_sampleinteger (default: ``8128); how many blocks should be generated in -one test sequence

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • dieharder_report_folder_prefixstring (default: dieharder_statistics_report); the folder to save -the generated statistics report from NIST STS

    • -
    • FLAG_CHARTboolean (default: False); draw the chart from dieharder statistical test if set to -True

    • +
      +
        +
      • test_type – string describing which test to run

      • +
      • bits_in_one_sequence_dieharder – integer parameter used to run the dieharder tests

      • +
      • number_of_sequences_dieharder – integer parameter used to run the dieharder tests

      • +
      • input_index – cipher input index

      • +
      • round_start – first round to be considered in the cipher

      • +
      • round_end – last round to be considered in the cipher

      • +
      • dieharder_report_folder_prefix - prefix for the unparsed dieharder tests output folder

      +

      OUTPUT:

      -
        -
      • dieharder_report_dicts – Dictionary-structure result parsed from dieharder statistical report. One could -also see the corresponding report generated under the folder dieharder_statistics_report folder

      • +
        +
          +
        • The results are going to be saved in a dictionary format compatible with the Report class

        -

        EXAMPLES:

        -
        sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
        -sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
        -sage: F = DieharderTests(SpeckBlockCipher(number_of_rounds=3)) # doctest: +SKIP
        -sage: result = F.run_correlation_dieharder_statistics_test(0, 5, 5, round_end=1) # long time # doctest: +SKIP
        -...
        -Dieharder Tests Finished!!!
        -...
        -
        -
        -
    - -
    -
    -static run_dieharder_statistical_tests_tool_interactively(input_file)
    -

    Run dieharder tests using the Dieharder library [1]. The result will be in dieharder_test_output.txt.

    -

    [1] https://webhome.phy.duke.edu/~rgb/General/dieharder.php

    -

    INPUT:

    -
      -
    • input_file – file containing the bit streams

    • -
    -

    OUTPUT:

    -
      -
    • the result would be saved as dieharder_test_output.txt

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: result = DieharderTests.run_dieharder_statistical_tests_tool_interactively( # doctest: +SKIP
    -....:     f'claasp/cipher_modules/statistical_tests/input_data_example', # doctest: +SKIP
    -....: ) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -
    -
    -
    - -
    -
    -run_high_density_dieharder_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, ratio=1, round_start=0, round_end=0, dieharder_report_folder_prefix='dieharder_statistics_report', FLAG_CHART=False)
    -

    Run the high density test.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, -inputs=[key, plaintext], input_index=0 means it will generate the key avalanche dataset. if input_index=1 -means it will generate the plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should -be passed to the statistical test tool.

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • -
      ratio – the ratio of weight 2 (that is, two 1 in the input) as high density inputs, range in [0, 1].

      For example, if ratio = 0.5, means half of the weight 2 high density inputs will be taken as inputs.

      -
      -
      -
    • -
    • round_startinteger (default: 0); the round that the statistical test starts -(includes, index starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends -(excludes, index starts from 0), if set to 0, -means run to the last round.

    • -
    • dieharder_report_folder_prefixstring (default: dieharder_statistics_report); The folder to save -the generated statistics report from NIST STS

    • -
    • FLAG_CHARTboolean (default: False); draw the chart from dieharder statistical test if set to -True

    • -
    -

    OUTPUT:

    -
      -
    • dieharder_report_dicts – Dictionary-structure result parsed from dieharder statistical report. One could -also see the corresponding report generated under the folder dieharder_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: F = DieharderTests(SpeckBlockCipher(number_of_rounds=3)) # doctest: +SKIP
    -sage: result = F.run_high_density_dieharder_statistics_test(0, 5, 5, round_end=1) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -...
    -
    -
    -
    - -
    -
    -run_low_density_dieharder_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, ratio=1, round_start=0, round_end=0, dieharder_report_folder_prefix='dieharder_statistics_report', FLAG_CHART=False)
    -

    Run the low density test.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, inputs=[key, plaintext], -input_index=0 means it will generate the key avalanche dataset. if input_index=1 means it will generate the -plaintext avalanche dataset

    • -
    • -
      number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should be

      passed to the statistical test tool

      -
      -
      -
    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • rationumber (default: 1); the ratio of weight 2 (that is, two 1 in the input) as low density -inputs, range in [0, 1]. For example, if ratio = 0.5, means half of the weight 2 low density inputs will be -taken as inputs

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • dieharder_report_folder_prefixstring (default: dieharder_statistics_report); the folder to save -the generated statistics report from NIST STS

    • -
    • FLAG_CHARTboolean (default: False); draw the chart from dieharder statistical test if set to -True

    • -
    -

    OUTPUT:

    -
      -
    • dieharder_report_dicts – Dictionary-structure result parsed from dieharder statistical report. One could -also see the corresponding report generated under the folder dieharder_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: F = DieharderTests(SpeckBlockCipher(number_of_rounds=3)) # doctest: +SKIP
    -sage: result = F.run_low_density_dieharder_statistics_test(0, 5, 5, round_end=1) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -...
    -
    -
    -
    - -
    -
    -run_random_dieharder_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, number_of_blocks_in_one_sample=8128, round_start=0, round_end=0, dieharder_report_folder_prefix='dieharder_statistics_report', FLAG_CHART=False)
    -

    Run the random test.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, inputs=[key, plaintext], -input_index=0 means it will generate the key avalanche dataset. if input_index=1 means it will generate the -plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should be -passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • number_of_blocks_in_one_sample – how many blocks should be generated in one test sequence

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • dieharder_report_folder_prefixstring (default: dieharder_statistics_report); the folder to save -the generated statistics report from NIST STS

    • -
    • FLAG_CHARTboolean (default: False); draw the chart from dieharder statistical test if set to -True

    • -
    -

    OUTPUT:

    -
      -
    • dieharder_report_dicts – Dictionary-structure result parsed from dieharder statistical report. One could -also see the corresponding report generated under the folder dieharder_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests
    -sage: F = DieharderTests(SpeckBlockCipher(number_of_rounds=3)) # doctest: +SKIP
    -sage: result = F.run_random_dieharder_statistics_test(0, 5, 5, round_end=1) # long time # doctest: +SKIP
    -...
    -Dieharder Tests Finished!!!
    -...
    -
    -
    +
    +

    EXAMPLE:

    +
    +

    from claasp.cipher_modules.statistical_tests.dieharder_statistical_tests import DieharderTests +from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher +speck = SpeckBlockCipher(number_of_rounds=5) +dieharder_tests = DieharderTests(speck) +dieharder_avalanche_test_results = dieharder_tests.dieharder_statistical_tests(‘avalanche’)

    +
    @@ -446,13 +107,13 @@

    Navigation

    This Page

    @@ -470,7 +131,7 @@

    Quick search

    - +
    @@ -485,10 +146,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -496,7 +157,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/statistical_tests/input_data_example.html b/docs/build/html/cipher_modules/statistical_tests/input_data_example.html index 54f16c49..acbedaf7 100644 --- a/docs/build/html/cipher_modules/statistical_tests/input_data_example.html +++ b/docs/build/html/cipher_modules/statistical_tests/input_data_example.html @@ -1,23 +1,24 @@ - + - Input data example — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Input data example — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Input data example

    +

    Input data example

    @@ -68,13 +69,13 @@

    Input data example

    This Page

    @@ -92,7 +93,7 @@

    Quick search

    - +
    @@ -107,10 +108,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -118,7 +119,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/statistical_tests/nist_statistical_tests.html b/docs/build/html/cipher_modules/statistical_tests/nist_statistical_tests.html index 6a6df06a..dc2a39ac 100644 --- a/docs/build/html/cipher_modules/statistical_tests/nist_statistical_tests.html +++ b/docs/build/html/cipher_modules/statistical_tests/nist_statistical_tests.html @@ -1,23 +1,24 @@ - + - Nist statistical tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Nist statistical tests — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,373 +57,41 @@

    Navigation

    -

    Nist statistical tests

    +

    Nist statistical tests

    -
    -class StatisticalTests(cipher)
    +
    +class NISTStatisticalTests(cipher)

    Bases: object

    -
    -static _generate_chart_all(report_dict_list, report_folder='')
    -

    Generate the corresponding chart based on the list of parsed report dictionary for all rounds.

    +
    +nist_statistical_tests(test_type, bits_in_one_sequence='default', number_of_sequences='default', input_index=0, round_start=0, round_end=0, nist_report_folder_prefix='nist_statistics_report', statistical_test_option_list='100000000000000')
    +

    Run the nist statistical tests.

    INPUT:

    -
      -
    • report_dict_listlist; the list of the parsed result in a dictionary format for all rounds

    • +
      +
        +
      • test_type – string describing which test to run

      • +
      • bits_in_one_sequence – integer parameter used to run the nist tests

      • +
      • number_of_sequences – integer parameter used to run the nist tests

      • +
      • input_index – cipher input index

      • +
      • round_start – first round to be considered in the cipher

      • +
      • round_end – last round to be considered in the cipher

      • +
      • nist_report_folder_prefix - prefix for the unparsed nist tests output folder

      +

      OUTPUT:

      -
        -
      • save the chart with filename f’nist_{data_type}_{cipher_name}.png’

      • +
        +
          +
        • The results are going to be saved in a dictionary format compatible with the Report class

        -

        EXAMPLES:

        -
        sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
        -sage: dict = StatisticalTests.parse_report(f'claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt')
        -Parsing claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt is in progress.
        -Parsing claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt is finished.
        -
        -sage: dict['data_type'] = 'random'
        -sage: dict['cipher_name'] = 'toy_cipher'
        -sage: dict['round'] = 1
        -sage: dict['rounds'] = 1
        -sage: dict_list = [dict]
        -sage: StatisticalTests._generate_chart_all(dict_list)
        -Drawing chart for all rounds is in progress.
        -Drawing chart for all rounds is in finished.
        -
        -
        -
    - -
    -
    -generate_chart_for_all_rounds(flag_chart, sts_report_dicts)
    -
    - -
    -
    -static _generate_chart_round(report_dict, report_folder='')
    -

    Generate the corresponding chart based on the parsed report dictionary.

    -

    INPUT:

    -
      -
    • report_dictdictionary; the parsed result in a dictionary format

    • -
    -

    OUTPUT:

    -
      -
    • save the chart with filename -f’nist_{report_dict[“data_type”]}_{report_dict[“cipher_name”]}_round_{report_dict[“round”]}.png’

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: dict = StatisticalTests.parse_report(f'claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt')
    -Parsing claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt is in progress.
    -Parsing claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt is finished.
    -
    -sage: dict['data_type'] = 'random'
    -sage: dict['cipher_name'] = 'toy_cipher'
    -sage: dict['round'] = 1
    -sage: dict['rounds'] = 1
    -sage: StatisticalTests._generate_chart_round(dict)
    -Drawing round 1 is in progress.
    -Drawing round 1 is finished.
    -
    -
    -
    - -
    -
    -static parse_report(report_filename)
    -

    Parse the nist statistical tests report. It will return the parsed result in a dictionary format.

    -

    INPUT:

    -
      -
    • report_filenamestr; the filename of the report you need to parse

    • -
    -

    OUTPUT:

    -
      -
    • report_dict – return the parsed result in a dictionary format

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: dict = StatisticalTests.parse_report(f'claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt')
    -Parsing claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt is in progress.
    -Parsing claasp/cipher_modules/statistical_tests/finalAnalysisReportExample.txt is finished.
    -
    -
    -
    - -
    -
    -run_CBC_nist_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, number_of_blocks_in_one_sample=8192, round_start=0, round_end=0, nist_sts_report_folder_prefix='test_reports/statistical_tests/nist_statistics_report', flag_chart=False)
    -

    Run the CBC test using NIST statistical tools.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, -inputs=[key, plaintext], input_index=0 means it will generate the key avalanche dataset. if input_index=1 -means it will generate the plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should -be passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • number_of_blocks_in_one_sampleinteger (default: 8192); how many blocks should be generated in -one test sequence

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • nist_sts_report_folder_prefixstring -(default: test_reports/statistical_tests/nist_statistics_report); the folder to save the generated -statistics report from NIST STS

    • -
    • flag_chartboolean (default: False); draw the chart from nist statistical test if set to True

    • -
    -

    OUTPUT:

    -
      -
    • nist_sts_report_dicts – Dictionary-structure result parsed from nist statistical report. One could also -see the corresponding report generated under the folder nist_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: F = StatisticalTests(SpeckBlockCipher(number_of_rounds=3))
    -sage: result = F.run_CBC_nist_statistics_test(0, 2, 2, round_end=2) # long time
    -     Statistical Testing In Progress.........
    -...
    -Finished.
    -
    -
    -
    - -
    -
    -run_avalanche_nist_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, round_start=0, round_end=0, nist_sts_report_folder_prefix='test_reports/statistical_tests/nist_statistics_report', flag_chart=False)
    -

    Run the avalanche test using NIST statistical tools.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, -inputs=[key, plaintext], input_index=0 means it will generate the key avalanche dataset. if input_index=1 -means it will generate the plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should -be passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • nist_sts_report_folder_prefixstring -(default: test_reports/statistical_tests/nist_statistics_report); the folder to save the generated -statistics report from NIST STS

    • -
    • flag_chartboolean (default: False); draw the chart from nist statistical test if set to True

    • -
    -

    OUTPUT:

    -
      -
    • nist_sts_report_dicts – Dictionary-structure result parsed from nist statistical report. One could also -see the corresponding report generated under the folder nist_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: F = StatisticalTests(SpeckBlockCipher(number_of_rounds=3))
    -sage: result = F.run_avalanche_nist_statistics_test(0, 10, 10, round_end=2)
    -     Statistical Testing In Progress.........
    -...
    -Finished.
    -
    -
    -
    - -
    -
    -run_correlation_nist_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, number_of_blocks_in_one_sample=8128, round_start=0, round_end=0, nist_sts_report_folder_prefix='test_reports/statistical_tests/nist_statistics_report', flag_chart=False)
    -

    Run the correlation test using NIST statistical tools.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, -inputs=[key, plaintext], input_index=0 means it will generate the key avalanche dataset. if input_index=1 -means it will generate the plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should -be passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • number_of_blocks_in_one_sampleinteger (default: 8128); how many blocks should be generated in -one test sequence

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_end – the round that the statistical test ends (excludes, index starts from 0), if set to 0, means -run to the last round.

    • -
    • nist_sts_report_folder_prefixstring -(default: test_reports/statistical_tests/nist_statistics_report); the folder to save the generated -statistics report from NIST STS

    • -
    • flag_chartboolean (default: False); draw the chart from nist statistical test if set to True

    • -
    -

    OUTPUT:

    -
      -
    • nist_sts_report_dicts – Dictionary-structure result parsed from nist statistical report. One could also -see the corresponding report generated under the folder nist_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: F = StatisticalTests(SpeckBlockCipher(number_of_rounds=3))
    -sage: result = F.run_correlation_nist_statistics_test(0, 10, 10, round_end=2)
    -     Statistical Testing In Progress.........
    -...
    -Finished.
    -
    -
    -
    - -
    -
    -run_high_density_nist_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, ratio=1, round_start=0, round_end=0, nist_sts_report_folder_prefix='test_reports/statistical_tests/nist_statistics_report', flag_chart=False)
    -

    Run the high density test using NIST statistical tools.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, -inputs=[key, plaintext], input_index=0 means it will generate the key avalanche dataset. if input_index=1 -means it will generate the plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should -be passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • ratiointeger (default: 1); the ratio of weight 2 (that is, two 1 in the input) as high density -inputs, range in [0, 1]. For example, if ratio = 0.5, means half of the weight 2 high density inputs will be -taken as inputs

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • nist_sts_report_folder_prefixstring -(default: test_reports/statistical_tests/nist_statistics_report); the folder to save the -generated statistics report from NIST STS

    • -
    • flag_chartboolean (default: False); draw the chart from nist statistical test if set to True

    • -
    -

    OUTPUT:

    -
      -
    • nist_sts_report_dicts – Dictionary-structure result parsed from nist statistical report. One could also -see the corresponding report generated under the folder nist_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: F = StatisticalTests(SpeckBlockCipher(number_of_rounds=3))
    -sage: result = F.run_high_density_nist_statistics_test(0, 10, 10, round_end=2)
    -     Statistical Testing In Progress.........
    -...
    -Finished.
    -
    -
    -
    - -
    -
    -run_low_density_nist_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, ratio=1, round_start=0, round_end=0, nist_sts_report_folder_prefix='test_reports/statistical_tests/nist_statistics_report', flag_chart=False)
    -

    Run the low density test using NIST statistical tools.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, -inputs=[key, plaintext], input_index=0 means it will generate the key avalanche dataset. if input_index=1 -means it will generate the plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should -be passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • ratiointeger (default: 1); the ratio of weight 2 (that is, two 1 in the input) as low density -inputs, range in [0, 1]. For example, if ratio = 0.5, means half of the weight 2 low density inputs will be -taken as inputs

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • nist_sts_report_folder_prefixstring -(default: test_reports/statistical_tests/nist_statistics_report); the folder to save the generated -statistics report from NIST STS

    • -
    • flag_chartboolean (default: False); draw the chart from nist statistical test if set to True

    • -
    -

    OUTPUT:

    -
      -
    • nist_sts_report_dicts – Dictionary-structure result parsed from nist statistical report. One could also -see the corresponding report generated under the folder nist_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: F = StatisticalTests(SpeckBlockCipher(number_of_rounds=3))
    -sage: result = F.run_low_density_nist_statistics_test(0, 10, 10, round_end=2)
    -     Statistical Testing In Progress.........
    -...
    -Finished.
    -
    -
    -
    - -
    -
    -static run_nist_statistical_tests_tool_interactively(input_file, bit_stream_length, number_of_bit_streams, input_file_format, statistical_test_option_list='111111111111111')
    -

    Run statistical tests using the NIST test suite [1]. The result will be in experiments folder. -Be aware that the NIST STS suits needed to be installed in /usr/local/bin in the docker image.

    -

    [1] https://csrc.nist.gov/Projects/Random-Bit-Generation/Documentation-and-Software

    -

    INPUT:

    -
      -
    • input_filestr; file containing the bit streams

    • -
    • bit_stream_lengthinteger; bit stream length (See [1])

    • -
    • number_of_bit_streamsinteger; number of bit streams in input_file

    • -
    • input_file_formatinteger; input_file format. Set to 0 to indicate a file containing a binary -string in ASCII, or 1 to indicate a binary file

    • -
    • test_typestr; the type of the test to run

    • -
    • statistical_test_option_liststr (default: 15 * '1'); a binary string of size 15. This string is -used to specify a set of statistical tests we want to run (See [1])

    • -
    -

    OUTPUT:

    -
      -
    • The result of the NIST statistical tests is in file test_reports/statistical_tests/experiments/AlgorithmTesting/finalAnalysisReport.txt

    • -
    -

    EXAMPLES:

    -
    sage: import os
    -sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: if not os.path.exists(f'test_reports/statistical_tests/experiments'):
    -....:     os.makedirs(f'test_reports/statistical_tests/experiments')
    -sage: result = StatisticalTests.run_nist_statistical_tests_tool_interactively(
    -....:     f'claasp/cipher_modules/statistical_tests/input_data_example',
    -....:     10000, 10, 1)
    -     Statistical Testing In Progress.........
    -     Statistical Testing Complete!!!!!!!!!!!!
    -
    -sage: result
    -True
    -
    -
    -
    - -
    -
    -run_random_nist_statistics_test(input_index, number_of_samples_in_one_line, number_of_lines, number_of_blocks_in_one_sample=8128, round_start=0, round_end=0, nist_sts_report_folder_prefix='test_reports/statistical_tests/nist_statistics_report', flag_chart=False)
    -

    Run the random test using NIST statistical tools.

    -

    INPUT:

    -
      -
    • input_indexinteger; the index of inputs to generate testing data. For example, -inputs=[key, plaintext], input_index=0 means it will generate the key avalanche dataset. if input_index=1 -means it will generate the plaintext avalanche dataset

    • -
    • number_of_samples_in_one_lineinteger; how many testing data should be generated in one line should -be passed to the statistical test tool

    • -
    • number_of_linesinteger; how many lines should be passed to the statistical test tool

    • -
    • number_of_blocks_in_one_sampleinteger (default: 8128); how many blocks should be generated in -one test sequence

    • -
    • round_startinteger (default: 0); the round that the statistical test starts (includes, index -starts from 0)

    • -
    • round_endinteger (default: 0); the round that the statistical test ends (excludes, index starts -from 0), if set to 0, means run to the last round

    • -
    • nist_sts_report_folder_prefixstring -(default: test_reports/statistical_tests/nist_statistics_report); The folder to save the generated -statistics report from NIST STS

    • -
    • flag_chartboolean (default: False); draw the chart from nist statistical test if set to True

    • -
    -

    OUTPUT:

    -
      -
    • nist_sts_report_dicts – Dictionary-structure result parsed from nist statistical report. One could also -see the corresponding report generated under the folder nist_statistics_report folder

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests
    -sage: F = StatisticalTests(SpeckBlockCipher(number_of_rounds=3))
    -sage: result = F.run_random_nist_statistics_test(0, 10, 10, round_end=2)
    -     Statistical Testing In Progress.........
    -...
    -Finished.
    -
    -
    +
    +

    EXAMPLE:

    +
    +

    from claasp.cipher_modules.statistical_tests.nist_statistical_tests import StatisticalTests +from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher +speck = SpeckBlockCipher(number_of_rounds=5) +nist_tests = StatisticalTests(speck) +nist_avalanche_test_results = nist_tests.nist_statistical_tests(‘avalanche’)

    +
    @@ -438,13 +107,13 @@

    Navigation

    Next topic

    -

    Aes block cipher

    +

    Utils

    This Page

    @@ -462,7 +131,7 @@

    Quick search

    - +
    @@ -477,10 +146,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -488,7 +157,7 @@

    Navigation

    - + diff --git a/docs/build/html/cipher_modules/tester.html b/docs/build/html/cipher_modules/tester.html index 4ac59b30..bf7e377a 100644 --- a/docs/build/html/cipher_modules/tester.html +++ b/docs/build/html/cipher_modules/tester.html @@ -1,23 +1,24 @@ - + - Tester — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Tester — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Tester

    +

    Tester

    test_against_reference_code(cipher, number_of_tests=5)
    @@ -78,13 +79,13 @@

    Navigation

    Previous topic

    -

    Generic functions

    +

    Graph generator

    Next topic

    -

    Evaluator

    +

    Code generator

    This Page

    @@ -102,7 +103,7 @@

    Quick search

    - +
    @@ -117,10 +118,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -128,7 +129,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/aes_block_cipher.html b/docs/build/html/ciphers/block_ciphers/aes_block_cipher.html index feaa1c35..c72d0b25 100644 --- a/docs/build/html/ciphers/block_ciphers/aes_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/aes_block_cipher.html @@ -1,23 +1,24 @@ - + - Aes block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Aes block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Aes block cipher

    +

    Aes block cipher

    class AESBlockCipher(number_of_rounds=10, word_size=8, state_size=4)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of AES Block Cipher.

    INPUT:

      @@ -224,94 +225,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -350,182 +268,11 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    -
    -
    convert_to_compound_xor_cipher()
    @@ -546,6 +293,11 @@

    Navigation

    create_mix_column_components(round_number, shift_row_components, word_size)
    +
    +
    +create_networx_graph_from_input_ids()
    +
    +
    create_rotate_component(remaining_xors, round_number, word_size)
    @@ -571,6 +323,11 @@

    Navigation

    create_shift_row_components(sboxes_components, word_size)
    +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    +
    create_xor_components(constant, key_sboxes_components, remaining_xors, xor1, round_number)
    @@ -606,53 +363,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -697,7 +407,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -711,11 +421,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -776,28 +489,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -833,35 +524,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -880,50 +542,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1189,43 +807,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1382,24 +963,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1534,38 +1097,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1642,70 +1173,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1730,13 +1207,13 @@

    Navigation

    This Page

    @@ -1754,7 +1231,7 @@

    Quick search

    - +
    @@ -1769,10 +1246,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1780,7 +1257,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/bea1_block_cipher.html b/docs/build/html/ciphers/block_ciphers/bea1_block_cipher.html index c28cc66e..8de7e7d9 100644 --- a/docs/build/html/ciphers/block_ciphers/bea1_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/bea1_block_cipher.html @@ -1,23 +1,24 @@ - + - Bea1 block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Bea1 block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Bea1 block cipher

    +

    Bea1 block cipher

    class BEA1BlockCipher(number_of_rounds=11)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of BEA-1 Block Cipher.

    INPUT:

      @@ -221,94 +222,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -347,185 +265,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -558,53 +315,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -649,7 +359,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -663,11 +373,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -728,28 +441,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -785,35 +476,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -832,50 +494,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1141,43 +759,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1334,24 +915,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1486,38 +1049,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1594,70 +1125,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    xor_round_key(round_number, key_state, cipher_state)
    @@ -1687,13 +1164,13 @@

    Navigation

    This Page

    @@ -1711,7 +1188,7 @@

    Quick search

    - +
    @@ -1726,10 +1203,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1737,7 +1214,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/constant_block_cipher.html b/docs/build/html/ciphers/block_ciphers/constant_block_cipher.html index 420449cc..34a98547 100644 --- a/docs/build/html/ciphers/block_ciphers/constant_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/constant_block_cipher.html @@ -1,23 +1,24 @@ - + - Constant block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Constant block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Constant block cipher

    +

    Constant block cipher

    class ConstantBlockCipher(block_bit_size=3, number_of_rounds=3)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Create an instance of ConstantBlockCipher class which will always output the constant value of last round number.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -305,94 +306,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -431,185 +349,19 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +convert_to_compound_xor_cipher()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_networx_graph_from_input_ids()
    @@ -617,6 +369,11 @@

    Navigation

    create_rounds(block_bit_size, number_of_rounds)
    +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    +
    property current_round
    @@ -647,53 +404,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -738,7 +448,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -752,11 +462,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -817,28 +530,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -874,35 +565,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -921,50 +583,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1230,43 +848,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1423,24 +1004,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1575,38 +1138,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1683,70 +1214,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1771,13 +1248,13 @@

    Navigation

    This Page

    @@ -1795,7 +1272,7 @@

    Quick search

    - +
    @@ -1810,10 +1287,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1821,7 +1298,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/des_block_cipher.html b/docs/build/html/ciphers/block_ciphers/des_block_cipher.html index fce22f0c..f54725de 100644 --- a/docs/build/html/ciphers/block_ciphers/des_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/des_block_cipher.html @@ -1,23 +1,24 @@ - + - Des block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Des block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Des block cipher

    +

    Des block cipher

    class DESBlockCipher(number_of_rounds=16, number_of_sboxes=8)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of DES Block Cipher.

    INPUT:

      @@ -223,94 +224,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -349,185 +267,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -560,53 +317,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -651,7 +361,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -665,11 +375,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -730,28 +443,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -787,35 +478,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -834,50 +496,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1143,43 +761,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1336,24 +917,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1488,38 +1051,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1596,70 +1127,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1684,13 +1161,13 @@

    Navigation

    This Page

    @@ -1708,7 +1185,7 @@

    Quick search

    - +
    @@ -1723,10 +1200,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1734,7 +1211,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/des_exact_key_length_block_cipher.html b/docs/build/html/ciphers/block_ciphers/des_exact_key_length_block_cipher.html index d1391607..6ae799ac 100644 --- a/docs/build/html/ciphers/block_ciphers/des_exact_key_length_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/des_exact_key_length_block_cipher.html @@ -1,23 +1,24 @@ - + - Des exact key length block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Des exact key length block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Des exact key length block cipher

    +

    Des exact key length block cipher

    class DESExactKeyLengthBlockCipher(number_of_rounds=16, number_of_sboxes=8)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of DES Block Cipher with exact key length.

    INPUT:

      @@ -226,94 +227,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -352,185 +270,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -563,53 +320,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -654,7 +364,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -668,11 +378,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -733,28 +446,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -790,35 +481,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -837,50 +499,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1146,43 +764,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1339,24 +920,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1491,38 +1054,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1599,70 +1130,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1687,13 +1164,13 @@

    Navigation

    This Page

    @@ -1711,7 +1188,7 @@

    Quick search

    - +
    @@ -1726,10 +1203,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1737,7 +1214,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/fancy_block_cipher.html b/docs/build/html/ciphers/block_ciphers/fancy_block_cipher.html index e3a19333..26dcf928 100644 --- a/docs/build/html/ciphers/block_ciphers/fancy_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/fancy_block_cipher.html @@ -1,23 +1,24 @@ - + - Fancy block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Fancy block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Fancy block cipher

    +

    Fancy block cipher

    class FancyBlockCipher(block_bit_size=24, key_bit_size=24, number_of_rounds=20)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object containing the graph representation the Fancy Block Cipher.

    The Fancy Block Cipher is not meant to be a secure cipher, but was created for testing purposes, and it includes several weaknesses by definition.

    @@ -240,94 +241,11 @@

    Navigation

    add_xor_component_to_even_round(key_bit_size, round_number, type2_key_schedule_and, type2_key_schedule_xor)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -371,185 +289,24 @@

    Navigation

    collect_input_id_links(type1_sboxes)
    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -582,53 +339,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -673,7 +383,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -687,11 +397,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -752,28 +465,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -809,35 +500,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -856,50 +518,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1165,43 +783,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1358,24 +939,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1510,38 +1073,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1618,70 +1149,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1706,13 +1183,13 @@

    Navigation

    This Page

    @@ -1730,7 +1207,7 @@

    Quick search

    - +
    @@ -1745,10 +1222,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1756,7 +1233,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/hight_block_cipher.html b/docs/build/html/ciphers/block_ciphers/hight_block_cipher.html index 348a33a4..875c844e 100644 --- a/docs/build/html/ciphers/block_ciphers/hight_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/hight_block_cipher.html @@ -1,23 +1,24 @@ - + - Hight block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Hight block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Hight block cipher

    +

    Hight block cipher

    class HightBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=0, sub_keys_zero=False, transformations_flag=True)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the HightBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    @@ -233,94 +234,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -359,185 +277,19 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +convert_to_compound_xor_cipher()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_networx_graph_from_input_ids()
    @@ -545,6 +297,11 @@

    Navigation

    create_sub_key(sub_key_temp_list, sub_keys_zero)
    +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    +
    property current_round
    @@ -575,53 +332,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -666,7 +376,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -680,11 +390,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -750,28 +463,6 @@ 

    Navigation

    final_transformation(plaintext_list, whitening_key_list)
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -807,35 +498,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -854,50 +516,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1173,43 +791,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1366,24 +947,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1523,38 +1086,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1631,70 +1162,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1734,8 +1211,8 @@

    Navigation

    Next topic

    @@ -1758,7 +1235,7 @@

    Quick search

    - +
    @@ -1776,7 +1253,7 @@

    Navigation

    next |
  • - previous |
  • @@ -1784,7 +1261,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/identity_block_cipher.html b/docs/build/html/ciphers/block_ciphers/identity_block_cipher.html index f6f3b2e1..0a4293e8 100644 --- a/docs/build/html/ciphers/block_ciphers/identity_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/identity_block_cipher.html @@ -1,23 +1,24 @@ - + - Identity block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Identity block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Identity block cipher

    +

    Identity block cipher

    class IdentityBlockCipher(block_bit_size=32, key_bit_size=32, number_of_rounds=1)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object containing the graph representation the Identity Block Cipher.

    The Identity Block Cipher encryption returns the message itself, i.e. IdentityBlockCipherEncryption(k,m) = m. @@ -236,94 +237,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -362,185 +280,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -573,53 +330,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -664,7 +374,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -678,11 +388,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -743,28 +456,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -800,35 +491,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -847,50 +509,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1156,43 +774,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1349,24 +930,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1501,38 +1064,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1609,70 +1140,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1697,13 +1174,13 @@

    Navigation

    This Page

    @@ -1721,7 +1198,7 @@

    Quick search

    - +
    @@ -1736,10 +1213,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1747,7 +1224,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/kasumi_block_cipher.html b/docs/build/html/ciphers/block_ciphers/kasumi_block_cipher.html index 0c688bd6..18498ff3 100644 --- a/docs/build/html/ciphers/block_ciphers/kasumi_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/kasumi_block_cipher.html @@ -1,23 +1,24 @@ - + - Kasumi block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Kasumi block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Kasumi block cipher

    +

    Kasumi block cipher

    class KasumiBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=8)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of Kasumi Block Cipher.

    INPUT:

      @@ -225,94 +226,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -567,53 +324,6 @@

    Navigation

    derived_key(key)
    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -658,7 +368,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -672,11 +382,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -733,8 +446,8 @@ 

    Navigation

    -
    -fi_function(p, ki_id, ki_positions)
    +
    +fi_function1(ids, ki_id, ki_positions)
    @@ -742,28 +455,6 @@

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -781,12 +472,12 @@

    Navigation

    -fl_function(p, sub_key)
    +fl_function(ids, positions, sub_key)
    -fo_function(p, sub_key)
    +fo_function(ids, positions, sub_key)
    @@ -809,35 +500,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -856,50 +518,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1043,6 +661,11 @@

    Navigation

    +
    +
    +static init_halves()
    +
    +
    property inputs
    @@ -1165,43 +788,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1358,24 +944,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1500,11 +1068,6 @@

    Navigation

    remove_round_component_from_id(round_id, component_id)
    -
    -
    -round_initialization()
    -
    -
    round_key(key, key_der, r)
    @@ -1520,38 +1083,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1628,70 +1159,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1716,13 +1193,13 @@

    Navigation

    This Page

    @@ -1740,7 +1217,7 @@

    Quick search

    - +
    @@ -1755,10 +1232,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1766,7 +1243,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/lblock_block_cipher.html b/docs/build/html/ciphers/block_ciphers/lblock_block_cipher.html index d9560bf6..03aab667 100644 --- a/docs/build/html/ciphers/block_ciphers/lblock_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/lblock_block_cipher.html @@ -1,23 +1,24 @@ - + - Lblock block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Lblock block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Lblock block cipher

    +

    Lblock block cipher

    class LBlockBlockCipher(number_of_rounds=32)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the LBlockBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1145,43 +763,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1338,24 +919,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1495,38 +1058,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1603,70 +1134,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    update_key(k, i)
    @@ -1696,13 +1173,13 @@

    Navigation

    This Page

    @@ -1720,7 +1197,7 @@

    Quick search

    - +
    @@ -1735,10 +1212,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1746,7 +1223,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/lea_block_cipher.html b/docs/build/html/ciphers/block_ciphers/lea_block_cipher.html index 21c4b235..c8438965 100644 --- a/docs/build/html/ciphers/block_ciphers/lea_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/lea_block_cipher.html @@ -1,23 +1,24 @@ - + - Lea block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Lea block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Lea block cipher

    +

    Lea block cipher

    class LeaBlockCipher(block_bit_size=128, key_bit_size=192, number_of_rounds=0, reorder_input_and_output=True)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the LeaBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -231,94 +232,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -357,185 +275,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -568,53 +325,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -659,7 +369,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -673,11 +383,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -738,28 +451,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -795,35 +486,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -842,50 +504,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1171,43 +789,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1364,24 +945,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1521,38 +1084,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1629,70 +1160,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1727,13 +1204,13 @@

    Navigation

    This Page

    @@ -1751,7 +1228,7 @@

    Quick search

    - +
    @@ -1766,10 +1243,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1777,7 +1254,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/lowmc_block_cipher.html b/docs/build/html/ciphers/block_ciphers/lowmc_block_cipher.html index de4ce2e8..7cccf870 100644 --- a/docs/build/html/ciphers/block_ciphers/lowmc_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/lowmc_block_cipher.html @@ -1,22 +1,23 @@ - + - Lowmc block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Lowmc block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Lowmc block cipher

    +

    Lowmc block cipher

    class LowMCBlockCipher(block_bit_size=128, key_bit_size=128, number_of_rounds=0, number_of_sboxes=0)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the LowMCBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -241,94 +242,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -367,185 +285,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -588,53 +345,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -679,7 +389,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -693,11 +403,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -758,28 +471,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -815,35 +506,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -862,50 +524,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1184,43 +802,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1377,24 +958,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1529,38 +1092,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    sbox_layer(plaintext_id)
    @@ -1654,70 +1185,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    update_key_register(key_id, round)
    @@ -1752,8 +1229,8 @@

    Previous topic

    This Page

    @@ -1771,7 +1248,7 @@

    Quick search

    - +
    @@ -1786,7 +1263,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/ciphers/block_ciphers/lowmc_generate_matrices.html b/docs/build/html/ciphers/block_ciphers/lowmc_generate_matrices.html index d1e92414..6314eddd 100644 --- a/docs/build/html/ciphers/block_ciphers/lowmc_generate_matrices.html +++ b/docs/build/html/ciphers/block_ciphers/lowmc_generate_matrices.html @@ -1,23 +1,24 @@ - + - Lowmc generate matrices — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Lowmc generate matrices — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Lowmc generate matrices

    +

    Lowmc generate matrices

    Adapted from the generate_matrices.py file the LowMC repo: https://github.com/LowMC/lowmc/blob/master/generate_matrices.py

    @@ -103,13 +104,13 @@

    Navigation

    This Page

    @@ -127,7 +128,7 @@

    Quick search

    - +
    @@ -142,10 +143,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -153,7 +154,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/midori_block_cipher.html b/docs/build/html/ciphers/block_ciphers/midori_block_cipher.html index 5631cc73..f6b3c15a 100644 --- a/docs/build/html/ciphers/block_ciphers/midori_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/midori_block_cipher.html @@ -1,23 +1,24 @@ - + - Midori block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Midori block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Midori block cipher

    +

    Midori block cipher

    class MidoriBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=0)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the MidoriBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1155,43 +773,6 @@

    Navigation

    mix_column(data)
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1348,24 +929,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1505,38 +1068,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1623,70 +1154,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1711,13 +1188,13 @@

    Navigation

    This Page

    @@ -1735,7 +1212,7 @@

    Quick search

    - +
    @@ -1750,10 +1227,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1761,7 +1238,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/present_block_cipher.html b/docs/build/html/ciphers/block_ciphers/present_block_cipher.html index c8e317a8..f97c0e92 100644 --- a/docs/build/html/ciphers/block_ciphers/present_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/present_block_cipher.html @@ -1,23 +1,24 @@ - + - Present block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Present block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Present block cipher

    +

    Present block cipher

    class PresentBlockCipher(key_bit_size=80, number_of_rounds=None)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the PresentBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -229,94 +230,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -355,185 +273,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -566,53 +323,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -657,7 +367,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -671,11 +381,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -736,28 +449,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -793,35 +484,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -840,50 +502,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1149,43 +767,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1347,24 +928,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1499,38 +1062,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    sbox_layer(data)
    @@ -1612,70 +1143,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    update_key_register(key, r)
    @@ -1705,13 +1182,13 @@

    Navigation

    This Page

    @@ -1729,7 +1206,7 @@

    Quick search

    - +
    @@ -1744,10 +1221,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1755,7 +1232,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/qarmav2_block_cipher.html b/docs/build/html/ciphers/block_ciphers/qarmav2_block_cipher.html index 47739311..9e43af0c 100644 --- a/docs/build/html/ciphers/block_ciphers/qarmav2_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/qarmav2_block_cipher.html @@ -1,23 +1,24 @@ - + - Qarmav2 block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Qarmav2 block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Qarmav2 block cipher

    +

    Qarmav2 block cipher

    class QARMAv2BlockCipher(number_of_rounds=10, number_of_layers=1, key_bit_size=128, tweak_bit_size=128)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of Qarma v2 Block Cipher.

    INPUT:

      @@ -230,94 +231,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -356,185 +274,34 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +constants_initialization()
    +
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    +
    +constants_update()
    +
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -568,51 +335,9 @@

    Navigation

    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    +
    +direct_round(state, key_state, tweak_state, tweak_permutation, constants_states, round_number)
    +
    @@ -658,7 +383,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -672,11 +397,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -737,28 +465,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -774,6 +480,11 @@

    Navigation

    +
    +
    +first_round_start(key_state)
    +
    +
    generate_bit_based_c_code(intermediate_output=False, verbosity=False)
    @@ -794,35 +505,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -841,50 +523,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1043,6 +681,11 @@

    Navigation

    inputs_size_to_dict()
    +
    +
    +inverse_round(state, key_state, tweak_state, tweak_permutation, constants_states, round_number)
    +
    +
    is_algebraically_secure(timeout)
    @@ -1140,6 +783,21 @@

    Navigation

    +
    +
    +key_initialization(key_bit_size)
    +
    + +
    +
    +key_update(key_state)
    +
    + +
    +
    +last_round_end(state, key_state, tweak_state, constants_states)
    +
    +
    majority_function(key)
    @@ -1155,43 +813,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1353,24 +974,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1480,6 +1083,11 @@

    Navigation

    property reference_code
    +
    +
    +reflector(state, key_state, tweak_state, constants_states)
    +
    +
    remove_key_schedule()
    @@ -1505,38 +1113,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1557,6 +1133,21 @@

    Navigation

    sort_cipher()
    +
    +
    +state_masking(id_links, bit_positions)
    +
    + +
    +
    +state_rotation(id_links)
    +
    + +
    +
    +state_sboxing(id_links, bit_positions, sbox)
    +
    +
    test_against_reference_code(number_of_tests=5)
    @@ -1614,63 +1205,14 @@

    Navigation

    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    +
    +tweak_initialization(tweak_permutation, tweak_bit_size)
    +
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    +
    +tweak_update(bit_positions, tweak_shuffle)
    +
    @@ -1678,8 +1220,13 @@

    Navigation

    -
    -update_constants(constant)
    + +
    + +
    +
    +update_single_constant(constant)
    @@ -1706,13 +1253,13 @@

    Navigation

    This Page

    @@ -1730,7 +1277,7 @@

    Quick search

    - +
    @@ -1745,10 +1292,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1756,7 +1303,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher.html b/docs/build/html/ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher.html new file mode 100644 index 00000000..4abb0534 --- /dev/null +++ b/docs/build/html/ciphers/block_ciphers/qarmav2_with_mixcolumn_block_cipher.html @@ -0,0 +1,1381 @@ + + + + + + + + + Qarmav2 with mixcolumn block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Qarmav2 with mixcolumn block cipher

    +
    +
    +class QARMAv2MixColumnBlockCipher(number_of_rounds=10, number_of_layers=1, key_bit_size=128, tweak_bit_size=128)
    +

    Bases: Cipher

    +

    Return a cipher object of Qarma v2 Block Cipher. This version uses the MixColumn component to model the diffusion layer, resulting in an invertible cipher object. +However, it may be less efficient that the QARMAv2BlockCipher cipher object for vectorized evaluation.

    +

    INPUT:

    +
      +
    • number_of_roundsinteger (default: 10); number of rounds of the cipher. Must be greater or equal than 1.

    • +
    • number_of_layersinteger (default: 1); number of layers of the state represented as matrices. Must be equal to 1 or 2.

    • +
    • key_bit_sizeinteger (default: 128); length of the key in bits. If number_of_layers is equal to 1 it must be equal to 128, otherwise it must be equal to 128, 192 or 256.

    • +
    • tweak_bit_sizeinteger (default: 128); length of the tweak in bits. Must be equal to either 64*number_of_layers or 128*number_of_layers.

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.qarmav2_with_mixcolumn_block_cipher import QARMAv2MixColumnBlockCipher
    +sage: qarmav2 = QARMAv2MixColumnBlockCipher(number_of_rounds = 4)
    +sage: key = 0x0123456789abcdeffedcba9876543210
    +sage: tweak = 0x7e5c3a18f6d4b2901eb852fc9630da74
    +sage: plaintext = 0x0000000000000000
    +sage: ciphertext = 0x2cc660354929f2ca
    +sage: qarmav2.evaluate([key, plaintext, tweak]) == ciphertext
    +True
    +
    +
    +
    +
    +add_AND_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_FSR_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_MODADD_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_MODSUB_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_NOT_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_OR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_SBOX_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_SHIFT_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_XOR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_cipher_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_concatenate_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_constant_component(output_bit_size, value)
    +
    + +
    +
    +add_intermediate_output_component(input_id_links, input_bit_positions, output_bit_size, output_tag)
    +
    + +
    +
    +add_linear_layer_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_mix_column_component(input_id_links, input_bit_positions, output_bit_size, mix_column_description)
    +
    + +
    +
    +add_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description)
    +
    + +
    +
    +add_reverse_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_round()
    +
    + +
    +
    +add_round_key_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_round_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_shift_rows_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_sigma_component(input_id_links, input_bit_positions, output_bit_size, rotation_amounts_parameter)
    +
    + +
    +
    +add_suffix_to_components(suffix, component_id_list=None)
    +
    + +
    +
    +add_theta_keccak_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_theta_xoodoo_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_variable_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_variable_shift_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    +
    + +
    +
    +as_python_dictionary()
    +
    + +
    +
    +cipher_inverse()
    +

    Return the graph representation of the inverse of the cipher under analysis

    +

    EXAMPLE:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: cipher = SpeckBlockCipher(number_of_rounds=2)
    +sage: ciphertext = cipher.evaluate([plaintext, key])
    +sage: cipher_inv = cipher.cipher_inverse()
    +sage: cipher_inv.evaluate([ciphertext, key]) == plaintext
    +True
    +
    +
    +
    + +
    +
    +cipher_partial_inverse(start_round=None, end_round=None, keep_key_schedule=False)
    +

    Returns the inverted portion of a cipher.

    +

    INPUT:

    +
      +
    • start_roundinteger; initial round number of the partial cipher

    • +
    • end_roundinteger; final round number of the partial cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: result = speck.evaluate([plaintext, key], intermediate_output=True)
    +sage: partial_speck = speck.cipher_partial_inverse(1, 2)
    +sage: partial_speck.evaluate([result[0], key]) == result[2]['intermediate_output_0_6'][0]
    +
    +
    +
    + +
    +
    +component_from(round_number, index)
    +
    + +
    +
    +constants_initialization()
    +
    + +
    +
    +constants_update()
    +
    + +
    +
    +convert_to_compound_xor_cipher()
    +
    + +
    +
    +create_networx_graph_from_input_ids()
    +
    + +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    + +
    +
    +property current_round
    +
    + +
    +
    +property current_round_number
    +
    + +
    +
    +property current_round_number_of_components
    +
    + +
    +
    +delete_generated_evaluate_c_shared_library()
    +

    Delete the file named <id_cipher>_evaluate.c and the corresponding executable.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().delete_generated_evaluate_c_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +direct_round(round_output, key_state, tweak_state, tweak_permutation, round_constant, round_number)
    +
    + +
    +
    +evaluate(cipher_input, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs

    • +
    • intermediate_outputboolean (default: False); set this flag to True to return a dictionary with +each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True to print the input/output of each +component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().evaluate([0x01234567,0x89ABCDEF])
    +19088743
    +
    +
    +
    + +
    +
    +evaluate_using_c(inputs, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • inputs

    • +
    • intermediate_outputboolean (default: False); Set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); Set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy(number_of_rounds=2).evaluate_using_c([0x012345,0x89ABCD], True) # random
    +{'round_key_output': [3502917, 73728],
    + 'round_output': [9834215],
    + 'cipher_output': [7457252]}
    +
    +
    +
    + +
    +
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)
    +

    Return the output of the cipher for multiple inputs.

    +

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, +and cipher_inputs[1] the second. +Each of the inputs is given as a numpy ndarray of np.uint8, of shape n*m, where n is the size +(in bytes) of the input, and m is the number of samples.

    +

    The return is a list of m*n ndarrays (format transposed compared to the input format), +where the list is of size 1 if intermediate_output is False, and NUMBER_OF_ROUNDS otherwise.

    +

    This function determines automatically if a bit-based evaluation is required, +and does the transformation transparently. The inputs and outputs are similar to evaluate_vectorized_byte.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, +with m the number of inputs to evaluate)

    • +
    • intermediate_outputboolean (default: False)

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    • +
    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    +
    sage: import numpy as np
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    +sage: K=np.random.randint(256, size=(8,2), dtype=np.uint8)
    +sage: X=np.random.randint(256, size=(4,2), dtype=np.uint8)
    +sage: result=speck.evaluate_vectorized([X, K])
    +sage: K0Lib=int.from_bytes(K[:,0].tobytes(), byteorder='big')
    +sage: K1Lib=int.from_bytes(K[:,1].tobytes(), byteorder='big')
    +sage: X0Lib=int.from_bytes(X[:,0].tobytes(), byteorder='big')
    +sage: X1Lib=int.from_bytes(X[:,1].tobytes(), byteorder='big')
    +sage: C0Lib=speck.evaluate([X0Lib, K0Lib])
    +sage: C1Lib=speck.evaluate([X1Lib, K1Lib])
    +sage: int.from_bytes(result[-1][0].tobytes(), byteorder='big') == C0Lib
    +True
    +sage: int.from_bytes(result[-1][1].tobytes(), byteorder='big') == C1Lib
    +True
    +
    +
    +
    + +
    +
    +evaluate_with_intermediate_outputs_continuous_diffusion_analysis(cipher_input, sbox_precomputations, sbox_precomputations_mix_columns, verbosity=False)
    +

    Return the output of the continuous generalized cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist of Decimal; block cipher input message

    • +
    • sbox_precomputations dictionary

    • +
    • sbox_precomputations_mix_columns dictionary

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from decimal import *
    +sage: plaintext_input = [Decimal('1') for i in range(32)]
    +sage: plaintext_input[10] = Decimal('0.802999073954890452142763024312444031238555908203125')
    +sage: key_input = [Decimal('-1') for i in range(64)]
    +sage: cipher_inputs = [plaintext_input, key_input]
    +sage: output = speck(number_of_rounds=2).evaluate_with_intermediate_outputs_continuous_diffusion_analysis(
    +....:     cipher_inputs,
    +....:     {},
    +....:     {}
    +....: )
    +sage: output[0][0] == Decimal('-1.000000000')
    +True
    +
    +
    +
    + +
    +
    +property family_name
    +
    + +
    +
    +property file_name
    +
    + +
    +
    +find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    +

    From [SGLYTQH2017] : Finds impossible differentials or zero-correlation linear approximations (based on type) +by fixing the input and output iteratively to all possible Hamming weight 1 value, and asking the solver +to find a solution; if none is found, then the propagation is impossible. +Return a list of impossible differentials or zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • typestring; {“differential”, “linear”}: the type of property to search for

    • +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    +
    +first_round_start(key_state)
    +
    + +
    +
    +generate_bit_based_c_code(intermediate_output=False, verbosity=False)
    +

    Return a string containing the C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: s = fancy().generate_bit_based_c_code()
    +sage: s[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    +

    Store the C code in a file named <id_cipher>_evaluate.c, and build the corresponding executable.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to make the C code +print a dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the C code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().generate_evaluate_c_code_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    +

    Return a string containing the optimized C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • word_sizeinteger; the size of the word

    • +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: word_based_c_code = speck().generate_word_based_c_code(20)
    +sage: word_based_c_code[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +get_all_components()
    +
    + +
    +
    +get_all_components_ids()
    +
    + +
    +
    +get_all_inputs_bit_positions()
    +
    + +
    +
    +get_component_from_id(component_id)
    +

    Return the component according to the id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: component = fancy.get_component_from_id('sbox_0_0')
    +sage: component.description
    +[0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15]
    +
    +
    +
    + +
    +
    +get_components_in_round(round_number)
    +
    + +
    +
    +get_current_component_id()
    +

    Use this function to get the current component id.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [4], 4)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(4, 0xF)
    +sage: constant_0_1 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.add_round()
    +sage: constant_1_0 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.get_current_component_id()
    +'constant_1_0'
    +
    +
    +
    + +
    +
    +get_model(technique, problem)
    +

    Returns a model for a given technique and problem.

    +

    INPUT:

    +
    +
      +
    • techniquestring ; sat, smt, milp or cp

    • +
    • problemstring ; xor_differential, xor_linear, cipher_model (more to be added as more model types are added to the library)

    • +
    +
    +
    + +
    +
    +get_number_of_components_in_round(round_number)
    +
    + +
    +
    +get_partial_cipher(start_round=None, end_round=None, keep_key_schedule=True)
    +
    + +
    +
    +get_round_from_component_id(component_id)
    +

    Return the round according to the round of the component id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: fancy.get_round_from_component_id('xor_1_14')
    +1
    +
    +
    +
    + +
    +
    +get_sizes_of_components_by_type()
    +
    + +
    +
    +property id
    +
    + +
    + +

    Return a list of impossible differentials if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    • scenariostring; the type of impossible differentials to search, single-key or related-key

    • +
    +
    + +
    +
    +property inputs
    +
    + +
    +
    +property inputs_bit_size
    +
    + +
    +
    +inputs_size_to_dict()
    +
    + +
    +
    +inverse_round(round_output, key_state, tweak_state, tweak_permutation, round_constant, round_number)
    +
    + +
    +
    +is_algebraically_secure(timeout)
    +

    Return True if the cipher is resistant against algebraic attack.

    +

    INPUT:

    +
      +
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • +
    +
    + +
    +
    +is_andrx()
    +

    Return True if the cipher is AndRX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_andrx()
    +False
    +
    +
    +
    + +
    +
    +is_arx()
    +

    Return True if the cipher is ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_arx()
    +False
    +
    +
    +
    + +
    +
    +is_power_of_2_word_based()
    +

    Return the word size if the cipher is word based (64, 32, 16 or 8 bits), False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: XTeaBlockCipher(number_of_rounds=32).is_power_of_2_word_based()
    +32
    +sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: MidoriBlockCipher(number_of_rounds=16).is_power_of_2_word_based()
    +False
    +
    +
    +
    + +
    +
    +is_shift_arx()
    +

    Return True if the cipher is Shift-ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: xtea = XTeaBlockCipher(number_of_rounds=32)
    +sage: xtea.is_shift_arx()
    +True
    +
    +
    +
    + +
    +
    +is_spn()
    +

    Return True if the cipher is SPN.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: aes = AESBlockCipher(number_of_rounds=2)
    +sage: aes.is_spn()
    +True
    +
    +
    +
    + +
    +
    +key_initialization(key_bit_size)
    +
    + +
    +
    +key_update(key_state)
    +
    + +
    +
    +last_round_end(round_output, key_state, round_key_shuffle, round_constant)
    +
    + +
    +
    +majority_function(key)
    +
    + +
    +
    +make_cipher_id()
    +
    + +
    +
    +make_file_name()
    +
    + +
    +
    +property number_of_rounds
    +
    + +
    +
    +o_function(key)
    +
    + +
    +
    +property output_bit_size
    +
    + +
    +
    +polynomial_system()
    +

    Return a polynomial system for the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    +sage: IdentityBlockCipher().polynomial_system()
    +Polynomial Sequence with 128 Polynomials in 256 Variables
    +
    +
    +
    + +
    +
    +polynomial_system_at_round(r)
    +

    Return a polynomial system for the cipher at round r.

    +

    INPUT:

    +
      +
    • rinteger; round index

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: FancyBlockCipher(number_of_rounds=1).polynomial_system_at_round(0)
    +Polynomial Sequence with 252 Polynomials in 288 Variables
    +
    +
    +
    + +
    +
    +print()
    +

    Print the structure of the cipher into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print()
    +cipher_id = cipher_name_i32_o32_r1
    +cipher_type = permutation
    +cipher_inputs = ['input']
    +cipher_inputs_bit_size = [32]
    +cipher_output_bit_size = 32
    +cipher_number_of_rounds = 1
    +
    +    # round = 0 - round component = 0
    +    id = constant_0_0
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +
    +    # round = 0 - round component = 1
    +    id = constant_0_1
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +cipher_reference_code = None
    +
    +
    +
    + +
    +
    +print_as_python_dictionary()
    +

    Use this function to print the cipher as a python dictionary into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print_as_python_dictionary()
    +cipher = {
    +'cipher_id': 'cipher_name_k32_p32_o32_r1',
    +'cipher_type': 'block_cipher',
    +'cipher_inputs': ['key', 'plaintext'],
    +'cipher_inputs_bit_size': [32, 32],
    +'cipher_output_bit_size': 32,
    +'cipher_number_of_rounds': 1,
    +'cipher_rounds' : [
    +  # round 0
    +  [
    +  {
    +    # round = 0 - round component = 0
    +    'id': 'constant_0_0',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  {
    +    # round = 0 - round component = 1
    +    'id': 'constant_0_1',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  ],
    +  ],
    +'cipher_reference_code': None,
    +}
    +
    +
    +
    + +
    +
    +print_as_python_dictionary_to_file(file_name='')
    +

    Use this function to print the cipher as a python dictionary to a file.

    +

    INPUT:

    +
      +
    • file_namestring; a python string representing a valid file name

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.print_as_python_dictionary_to_file("claasp/ciphers/dictionary_example.py")
    +sage: os.remove("claasp/ciphers/dictionary_example.py")
    +
    +
    +
    + +
    +
    +print_evaluation_python_code(verbosity=False)
    +

    Print the python code that implement the evaluation function of the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().print_evaluation_python_code() # random
    +from copy import copy
    +from bitstring import BitArray
    +from claasp.cipher_modules.generic_functions import *
    +
    +def evaluate(input):
    +    plaintext_output = copy(BitArray(uint=input[0], length=32))
    +    key_output = copy(BitArray(uint=input[1], length=32))
    +    intermediate_output = {}
    +    intermediate_output['cipher_output'] = []
    +    intermediate_output['round_key_output'] = []
    +    components_io = {}
    +    component_input = BitArray(1)
    +
    +    # round: 0, component: 0, component_id: concatenate_0_0
    +    component_input = select_bits(key_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_0_output = component_input
    +    components_io['concatenate_0_0'] = [component_input.uint, concatenate_0_0_output.uint]
    +
    +    # round: 0, component: 1, component_id: intermediate_output_0_1
    +    component_input = select_bits(concatenate_0_0_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    intermediate_output_0_1_output = component_input
    +    intermediate_output['round_key_output'].append(intermediate_output_0_1_output.uint)
    +    components_io['intermediate_output_0_1'] = [component_input.uint, intermediate_output_0_1_output.uint]
    +
    +    # round: 0, component: 2, component_id: concatenate_0_2
    +    component_input = select_bits(plaintext_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_2_output = component_input
    +    components_io['concatenate_0_2'] = [component_input.uint, concatenate_0_2_output.uint]
    +
    +    # round: 0, component: 3, component_id: cipher_output_0_3
    +    component_input = select_bits(concatenate_0_2_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    cipher_output_0_3_output = component_input
    +    intermediate_output['cipher_output'].append(cipher_output_0_3_output.uint)
    +    cipher_output = cipher_output_0_3_output.uint
    +    components_io['cipher_output_0_3'] = [component_input.uint, cipher_output_0_3_output.uint]
    +
    +    return cipher_output, intermediate_output, components_io
    +
    +
    +
    + +
    +
    +print_evaluation_python_code_to_file(file_name)
    +

    Use this function to print the python code to a file.

    +

    INPUT:

    +
      +
    • file_namestring; name of the output file

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity = identity()
    +sage: identity.file_name
    +'identity_block_cipher_p32_k32_o32_r1.py'
    +sage: identity.print_evaluation_python_code_to_file(identity.id + 'evaluation.py') # doctest: +SKIP
    +
    +
    +
    + +
    +
    +print_input_information()
    +

    Print a list of the inputs with their corresponding bit size.

    +
    +
    Possible cipher inputs are:
      +
    • plaintext

    • +
    • key

    • +
    • tweak

    • +
    • initialization vector

    • +
    • nonce

    • +
    • constant

    • +
    • etc.

    • +
    +
    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher()
    +sage: fancy.print_input_information()
    +plaintext of bit size 24
    +key of bit size 24
    +
    +
    +
    + +
    +
    +property reference_code
    +
    + +
    +
    +reflector(round_output, key_state, round_key_shuffle, round_constant)
    +
    + +
    +
    +remove_key_schedule()
    +
    + +
    +
    +remove_round_component(round_id, component)
    +
    + +
    +
    +remove_round_component_from_id(round_id, component_id)
    +
    + +
    +
    +property rounds
    +
    + +
    +
    +property rounds_as_list
    +
    + +
    +
    +set_file_name(file_name)
    +
    + +
    +
    +set_id(cipher_id)
    +
    + +
    +
    +set_inputs(inputs_ids_list, inputs_bit_size_list)
    +
    + +
    +
    +sort_cipher()
    +
    + +
    +
    +test_against_reference_code(number_of_tests=5)
    +

    Test the graph representation against its reference implementation (if available) with random inputs.

    +

    INPUT:

    +
      +
    • number_of_testsinteger (default: 5); number of tests to execute

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher as xtea
    +sage: xtea(number_of_rounds=32).test_against_reference_code()
    +True
    +
    +
    +
    + +
    +
    +test_vector_check(list_of_test_vectors_input, list_of_test_vectors_output)
    +

    Testing the cipher with list of test vectors input and list of test vectors output.

    +

    INPUT:

    +
      +
    • list_of_test_vectors_inputlist; list of input testing vectors

    • +
    • list_of_test_vectors_outputlist; list of the expected output of the corresponding input testing +vectors. That is, list_of_test_vectors_output[i] = cipher.evaluate(list_of_test_vectors_input[i])

    • +
    +

    OUTPUT:

    +
      +
    • test_result – output of the testing. True if all the cipher.evaluate(input)=output for every input

    • +
    +

    test vectors, and False, otherwise.

    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(number_of_rounds=22)
    +sage: key1 = 0x1918111009080100
    +sage: plaintext1 = 0x6574694c
    +sage: ciphertext1 = 0xa86842f2
    +sage: key2 = 0x1918111009080100
    +sage: plaintext2 = 0x6574694d
    +sage: ciphertext2 = 0x2b5f25d6
    +sage: input_list=[[plaintext1, key1], [plaintext2, key2]]
    +sage: output_list=[ciphertext1, ciphertext2]
    +sage: speck.test_vector_check(input_list, output_list)
    +True
    +sage: input_list.append([0x11111111, 0x1111111111111111])
    +sage: output_list.append(0xFFFFFFFF)
    +sage: speck.test_vector_check(input_list, output_list)
    +Testing Failed
    +index: 2
    +input:  [286331153, 1229782938247303441]
    +output:  4294967295
    +False
    +
    +
    +
    + +
    +
    +tweak_initialization(tweak_permutation, tweak_bit_size)
    +
    + +
    +
    +property type
    +
    + +
    + +
    + +
    +
    +update_single_constant(constant)
    +
    + +
    + +

    Return a list of zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/ciphers/block_ciphers/raiden_block_cipher.html b/docs/build/html/ciphers/block_ciphers/raiden_block_cipher.html index b2f7dcfb..66c8f300 100644 --- a/docs/build/html/ciphers/block_ciphers/raiden_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/raiden_block_cipher.html @@ -1,23 +1,24 @@ - + - Raiden block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Raiden block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Raiden block cipher

    +

    Raiden block cipher

    class RaidenBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=0, right_shift_amount=14, left_shift_amount=9)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the RaidenBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -226,94 +227,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -352,185 +270,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -563,53 +320,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -654,7 +364,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -668,11 +378,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -733,28 +446,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -790,35 +481,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -837,50 +499,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1146,43 +764,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1339,24 +920,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1491,38 +1054,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1599,70 +1130,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1687,13 +1164,13 @@

    Navigation

    This Page

    @@ -1711,7 +1188,7 @@

    Quick search

    - +
    @@ -1726,10 +1203,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1737,7 +1214,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/rc5_block_cipher.html b/docs/build/html/ciphers/block_ciphers/rc5_block_cipher.html index deafaaf6..9d3242bd 100644 --- a/docs/build/html/ciphers/block_ciphers/rc5_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/rc5_block_cipher.html @@ -1,23 +1,24 @@ - + - Rc5 block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Rc5 block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Rc5 block cipher

    +

    Rc5 block cipher

    class RC5BlockCipher(number_of_rounds=16, word_size=16, key_size=64)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of RC5 Block Cipher.

    INPUT:

      @@ -223,94 +224,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -349,190 +267,29 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    -
    compute_magic_constants(word_size)
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -565,53 +322,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -656,7 +366,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -670,11 +380,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -735,28 +448,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -797,35 +488,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -844,50 +506,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1158,43 +776,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1351,24 +932,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1508,38 +1071,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1616,70 +1147,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1704,13 +1181,13 @@

    Navigation

    This Page

    @@ -1728,7 +1205,7 @@

    Quick search

    - +
    @@ -1743,10 +1220,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1754,7 +1231,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/scarf_block_cipher.html b/docs/build/html/ciphers/block_ciphers/scarf_block_cipher.html new file mode 100644 index 00000000..8ecdbfc6 --- /dev/null +++ b/docs/build/html/ciphers/block_ciphers/scarf_block_cipher.html @@ -0,0 +1,1349 @@ + + + + + + + + + Scarf block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Scarf block cipher

    +
    +
    +class SCARFBlockCipher(number_of_rounds=8)
    +

    Bases: Cipher

    +

    Construct an instance of the SCARFBlockCipher class.

    +

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    +

    INPUT:

    +
      +
    • block_bit_sizeinteger (default: 10); cipher input and output block bit size of the cipher

    • +
    • key_bit_sizeinteger (default: 240); cipher key bit size of the cipher

    • +
    • tweak_bit_sizeinteger (default: 48); cipher tweak bit size of the cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.scarf_block_cipher import SCARFBlockCipher
    +sage: scarf = SCARFBlockCipher()
    +sage: scarf.number_of_rounds
    +8
    +
    +sage: scarf.component_from(0, 0).id
    +'constant_0_0'
    +
    +
    +
    +
    +F_function(data, Ti, current_round)
    +
    + +
    +
    +add_AND_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_FSR_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_MODADD_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_MODSUB_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_NOT_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_OR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_SBOX_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_SHIFT_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_XOR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_cipher_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_concatenate_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_constant_component(output_bit_size, value)
    +
    + +
    +
    +add_intermediate_output_component(input_id_links, input_bit_positions, output_bit_size, output_tag)
    +
    + +
    +
    +add_linear_layer_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_mix_column_component(input_id_links, input_bit_positions, output_bit_size, mix_column_description)
    +
    + +
    +
    +add_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description)
    +
    + +
    +
    +add_reverse_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_round()
    +
    + +
    +
    +add_round_key_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_round_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_shift_rows_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_sigma_component(input_id_links, input_bit_positions, output_bit_size, rotation_amounts_parameter)
    +
    + +
    +
    +add_subkey(data, Ti, current_round)
    +
    + +
    +
    +add_suffix_to_components(suffix, component_id_list=None)
    +
    + +
    +
    +add_theta_keccak_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_theta_xoodoo_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_variable_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_variable_shift_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    +
    + +
    +
    +as_python_dictionary()
    +
    + +
    +
    +cipher_inverse()
    +

    Return the graph representation of the inverse of the cipher under analysis

    +

    EXAMPLE:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: cipher = SpeckBlockCipher(number_of_rounds=2)
    +sage: ciphertext = cipher.evaluate([plaintext, key])
    +sage: cipher_inv = cipher.cipher_inverse()
    +sage: cipher_inv.evaluate([ciphertext, key]) == plaintext
    +True
    +
    +
    +
    + +
    +
    +cipher_partial_inverse(start_round=None, end_round=None, keep_key_schedule=False)
    +

    Returns the inverted portion of a cipher.

    +

    INPUT:

    +
      +
    • start_roundinteger; initial round number of the partial cipher

    • +
    • end_roundinteger; final round number of the partial cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: result = speck.evaluate([plaintext, key], intermediate_output=True)
    +sage: partial_speck = speck.cipher_partial_inverse(1, 2)
    +sage: partial_speck.evaluate([result[0], key]) == result[2]['intermediate_output_0_6'][0]
    +
    +
    +
    + +
    +
    +component_from(round_number, index)
    +
    + +
    +
    +convert_to_compound_xor_cipher()
    +
    + +
    +
    +create_and_components(rot_components, and_components, Ti, current_round)
    +
    + +
    +
    +create_networx_graph_from_input_ids()
    +
    + +
    +
    +create_rot_components(data, rot_components)
    +
    + +
    +
    +create_sbox_components(Ti, sboxes_components)
    +
    + +
    +
    +create_sigma_components(sboxes_components)
    +
    + +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    + +
    +
    +property current_round
    +
    + +
    +
    +property current_round_number
    +
    + +
    +
    +property current_round_number_of_components
    +
    + +
    +
    +delete_generated_evaluate_c_shared_library()
    +

    Delete the file named <id_cipher>_evaluate.c and the corresponding executable.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().delete_generated_evaluate_c_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +evaluate(cipher_input, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs

    • +
    • intermediate_outputboolean (default: False); set this flag to True to return a dictionary with +each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True to print the input/output of each +component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().evaluate([0x01234567,0x89ABCDEF])
    +19088743
    +
    +
    +
    + +
    +
    +evaluate_using_c(inputs, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • inputs

    • +
    • intermediate_outputboolean (default: False); Set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); Set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy(number_of_rounds=2).evaluate_using_c([0x012345,0x89ABCD], True) # random
    +{'round_key_output': [3502917, 73728],
    + 'round_output': [9834215],
    + 'cipher_output': [7457252]}
    +
    +
    +
    + +
    +
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)
    +

    Return the output of the cipher for multiple inputs.

    +

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, +and cipher_inputs[1] the second. +Each of the inputs is given as a numpy ndarray of np.uint8, of shape n*m, where n is the size +(in bytes) of the input, and m is the number of samples.

    +

    The return is a list of m*n ndarrays (format transposed compared to the input format), +where the list is of size 1 if intermediate_output is False, and NUMBER_OF_ROUNDS otherwise.

    +

    This function determines automatically if a bit-based evaluation is required, +and does the transformation transparently. The inputs and outputs are similar to evaluate_vectorized_byte.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, +with m the number of inputs to evaluate)

    • +
    • intermediate_outputboolean (default: False)

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    • +
    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    +
    sage: import numpy as np
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    +sage: K=np.random.randint(256, size=(8,2), dtype=np.uint8)
    +sage: X=np.random.randint(256, size=(4,2), dtype=np.uint8)
    +sage: result=speck.evaluate_vectorized([X, K])
    +sage: K0Lib=int.from_bytes(K[:,0].tobytes(), byteorder='big')
    +sage: K1Lib=int.from_bytes(K[:,1].tobytes(), byteorder='big')
    +sage: X0Lib=int.from_bytes(X[:,0].tobytes(), byteorder='big')
    +sage: X1Lib=int.from_bytes(X[:,1].tobytes(), byteorder='big')
    +sage: C0Lib=speck.evaluate([X0Lib, K0Lib])
    +sage: C1Lib=speck.evaluate([X1Lib, K1Lib])
    +sage: int.from_bytes(result[-1][0].tobytes(), byteorder='big') == C0Lib
    +True
    +sage: int.from_bytes(result[-1][1].tobytes(), byteorder='big') == C1Lib
    +True
    +
    +
    +
    + +
    +
    +evaluate_with_intermediate_outputs_continuous_diffusion_analysis(cipher_input, sbox_precomputations, sbox_precomputations_mix_columns, verbosity=False)
    +

    Return the output of the continuous generalized cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist of Decimal; block cipher input message

    • +
    • sbox_precomputations dictionary

    • +
    • sbox_precomputations_mix_columns dictionary

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from decimal import *
    +sage: plaintext_input = [Decimal('1') for i in range(32)]
    +sage: plaintext_input[10] = Decimal('0.802999073954890452142763024312444031238555908203125')
    +sage: key_input = [Decimal('-1') for i in range(64)]
    +sage: cipher_inputs = [plaintext_input, key_input]
    +sage: output = speck(number_of_rounds=2).evaluate_with_intermediate_outputs_continuous_diffusion_analysis(
    +....:     cipher_inputs,
    +....:     {},
    +....:     {}
    +....: )
    +sage: output[0][0] == Decimal('-1.000000000')
    +True
    +
    +
    +
    + +
    +
    +property family_name
    +
    + +
    +
    +property file_name
    +
    + +
    +
    +find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    +

    From [SGLYTQH2017] : Finds impossible differentials or zero-correlation linear approximations (based on type) +by fixing the input and output iteratively to all possible Hamming weight 1 value, and asking the solver +to find a solution; if none is found, then the propagation is impossible. +Return a list of impossible differentials or zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • typestring; {“differential”, “linear”}: the type of property to search for

    • +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    +
    +generate_bit_based_c_code(intermediate_output=False, verbosity=False)
    +

    Return a string containing the C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: s = fancy().generate_bit_based_c_code()
    +sage: s[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    +

    Store the C code in a file named <id_cipher>_evaluate.c, and build the corresponding executable.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to make the C code +print a dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the C code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().generate_evaluate_c_code_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    +

    Return a string containing the optimized C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • word_sizeinteger; the size of the word

    • +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: word_based_c_code = speck().generate_word_based_c_code(20)
    +sage: word_based_c_code[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +get_all_components()
    +
    + +
    +
    +get_all_components_ids()
    +
    + +
    +
    +get_all_inputs_bit_positions()
    +
    + +
    +
    +get_component_from_id(component_id)
    +

    Return the component according to the id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: component = fancy.get_component_from_id('sbox_0_0')
    +sage: component.description
    +[0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15]
    +
    +
    +
    + +
    +
    +get_components_in_round(round_number)
    +
    + +
    +
    +get_current_component_id()
    +

    Use this function to get the current component id.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [4], 4)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(4, 0xF)
    +sage: constant_0_1 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.add_round()
    +sage: constant_1_0 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.get_current_component_id()
    +'constant_1_0'
    +
    +
    +
    + +
    +
    +get_model(technique, problem)
    +

    Returns a model for a given technique and problem.

    +

    INPUT:

    +
    +
      +
    • techniquestring ; sat, smt, milp or cp

    • +
    • problemstring ; xor_differential, xor_linear, cipher_model (more to be added as more model types are added to the library)

    • +
    +
    +
    + +
    +
    +get_number_of_components_in_round(round_number)
    +
    + +
    +
    +get_partial_cipher(start_round=None, end_round=None, keep_key_schedule=True)
    +
    + +
    +
    +get_round_from_component_id(component_id)
    +

    Return the round according to the round of the component id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: fancy.get_round_from_component_id('xor_1_14')
    +1
    +
    +
    +
    + +
    +
    +get_sizes_of_components_by_type()
    +
    + +
    +
    +property id
    +
    + +
    + +

    Return a list of impossible differentials if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    • scenariostring; the type of impossible differentials to search, single-key or related-key

    • +
    +
    + +
    +
    +property inputs
    +
    + +
    +
    +property inputs_bit_size
    +
    + +
    +
    +inputs_size_to_dict()
    +
    + +
    +
    +is_algebraically_secure(timeout)
    +

    Return True if the cipher is resistant against algebraic attack.

    +

    INPUT:

    +
      +
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • +
    +
    + +
    +
    +is_andrx()
    +

    Return True if the cipher is AndRX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_andrx()
    +False
    +
    +
    +
    + +
    +
    +is_arx()
    +

    Return True if the cipher is ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_arx()
    +False
    +
    +
    +
    + +
    +
    +is_power_of_2_word_based()
    +

    Return the word size if the cipher is word based (64, 32, 16 or 8 bits), False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: XTeaBlockCipher(number_of_rounds=32).is_power_of_2_word_based()
    +32
    +sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: MidoriBlockCipher(number_of_rounds=16).is_power_of_2_word_based()
    +False
    +
    +
    +
    + +
    +
    +is_shift_arx()
    +

    Return True if the cipher is Shift-ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: xtea = XTeaBlockCipher(number_of_rounds=32)
    +sage: xtea.is_shift_arx()
    +True
    +
    +
    +
    + +
    +
    +is_spn()
    +

    Return True if the cipher is SPN.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: aes = AESBlockCipher(number_of_rounds=2)
    +sage: aes.is_spn()
    +True
    +
    +
    +
    + +
    +
    +make_cipher_id()
    +
    + +
    +
    +make_file_name()
    +
    + +
    +
    +property number_of_rounds
    +
    + +
    +
    +property output_bit_size
    +
    + +
    +
    +polynomial_system()
    +

    Return a polynomial system for the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    +sage: IdentityBlockCipher().polynomial_system()
    +Polynomial Sequence with 128 Polynomials in 256 Variables
    +
    +
    +
    + +
    +
    +polynomial_system_at_round(r)
    +

    Return a polynomial system for the cipher at round r.

    +

    INPUT:

    +
      +
    • rinteger; round index

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: FancyBlockCipher(number_of_rounds=1).polynomial_system_at_round(0)
    +Polynomial Sequence with 252 Polynomials in 288 Variables
    +
    +
    +
    + +
    +
    +print()
    +

    Print the structure of the cipher into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print()
    +cipher_id = cipher_name_i32_o32_r1
    +cipher_type = permutation
    +cipher_inputs = ['input']
    +cipher_inputs_bit_size = [32]
    +cipher_output_bit_size = 32
    +cipher_number_of_rounds = 1
    +
    +    # round = 0 - round component = 0
    +    id = constant_0_0
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +
    +    # round = 0 - round component = 1
    +    id = constant_0_1
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +cipher_reference_code = None
    +
    +
    +
    + +
    +
    +print_as_python_dictionary()
    +

    Use this function to print the cipher as a python dictionary into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print_as_python_dictionary()
    +cipher = {
    +'cipher_id': 'cipher_name_k32_p32_o32_r1',
    +'cipher_type': 'block_cipher',
    +'cipher_inputs': ['key', 'plaintext'],
    +'cipher_inputs_bit_size': [32, 32],
    +'cipher_output_bit_size': 32,
    +'cipher_number_of_rounds': 1,
    +'cipher_rounds' : [
    +  # round 0
    +  [
    +  {
    +    # round = 0 - round component = 0
    +    'id': 'constant_0_0',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  {
    +    # round = 0 - round component = 1
    +    'id': 'constant_0_1',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  ],
    +  ],
    +'cipher_reference_code': None,
    +}
    +
    +
    +
    + +
    +
    +print_as_python_dictionary_to_file(file_name='')
    +

    Use this function to print the cipher as a python dictionary to a file.

    +

    INPUT:

    +
      +
    • file_namestring; a python string representing a valid file name

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.print_as_python_dictionary_to_file("claasp/ciphers/dictionary_example.py")
    +sage: os.remove("claasp/ciphers/dictionary_example.py")
    +
    +
    +
    + +
    +
    +print_evaluation_python_code(verbosity=False)
    +

    Print the python code that implement the evaluation function of the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().print_evaluation_python_code() # random
    +from copy import copy
    +from bitstring import BitArray
    +from claasp.cipher_modules.generic_functions import *
    +
    +def evaluate(input):
    +    plaintext_output = copy(BitArray(uint=input[0], length=32))
    +    key_output = copy(BitArray(uint=input[1], length=32))
    +    intermediate_output = {}
    +    intermediate_output['cipher_output'] = []
    +    intermediate_output['round_key_output'] = []
    +    components_io = {}
    +    component_input = BitArray(1)
    +
    +    # round: 0, component: 0, component_id: concatenate_0_0
    +    component_input = select_bits(key_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_0_output = component_input
    +    components_io['concatenate_0_0'] = [component_input.uint, concatenate_0_0_output.uint]
    +
    +    # round: 0, component: 1, component_id: intermediate_output_0_1
    +    component_input = select_bits(concatenate_0_0_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    intermediate_output_0_1_output = component_input
    +    intermediate_output['round_key_output'].append(intermediate_output_0_1_output.uint)
    +    components_io['intermediate_output_0_1'] = [component_input.uint, intermediate_output_0_1_output.uint]
    +
    +    # round: 0, component: 2, component_id: concatenate_0_2
    +    component_input = select_bits(plaintext_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_2_output = component_input
    +    components_io['concatenate_0_2'] = [component_input.uint, concatenate_0_2_output.uint]
    +
    +    # round: 0, component: 3, component_id: cipher_output_0_3
    +    component_input = select_bits(concatenate_0_2_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    cipher_output_0_3_output = component_input
    +    intermediate_output['cipher_output'].append(cipher_output_0_3_output.uint)
    +    cipher_output = cipher_output_0_3_output.uint
    +    components_io['cipher_output_0_3'] = [component_input.uint, cipher_output_0_3_output.uint]
    +
    +    return cipher_output, intermediate_output, components_io
    +
    +
    +
    + +
    +
    +print_evaluation_python_code_to_file(file_name)
    +

    Use this function to print the python code to a file.

    +

    INPUT:

    +
      +
    • file_namestring; name of the output file

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity = identity()
    +sage: identity.file_name
    +'identity_block_cipher_p32_k32_o32_r1.py'
    +sage: identity.print_evaluation_python_code_to_file(identity.id + 'evaluation.py') # doctest: +SKIP
    +
    +
    +
    + +
    +
    +print_input_information()
    +

    Print a list of the inputs with their corresponding bit size.

    +
    +
    Possible cipher inputs are:
      +
    • plaintext

    • +
    • key

    • +
    • tweak

    • +
    • initialization vector

    • +
    • nonce

    • +
    • constant

    • +
    • etc.

    • +
    +
    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher()
    +sage: fancy.print_input_information()
    +plaintext of bit size 24
    +key of bit size 24
    +
    +
    +
    + +
    +
    +property reference_code
    +
    + +
    +
    +remove_key_schedule()
    +
    + +
    +
    +remove_round_component(round_id, component)
    +
    + +
    +
    +remove_round_component_from_id(round_id, component_id)
    +
    + +
    +
    +property rounds
    +
    + +
    +
    +property rounds_as_list
    +
    + +
    +
    +set_file_name(file_name)
    +
    + +
    +
    +set_id(cipher_id)
    +
    + +
    +
    +set_inputs(inputs_ids_list, inputs_bit_size_list)
    +
    + +
    +
    +sort_cipher()
    +
    + +
    +
    +test_against_reference_code(number_of_tests=5)
    +

    Test the graph representation against its reference implementation (if available) with random inputs.

    +

    INPUT:

    +
      +
    • number_of_testsinteger (default: 5); number of tests to execute

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher as xtea
    +sage: xtea(number_of_rounds=32).test_against_reference_code()
    +True
    +
    +
    +
    + +
    +
    +test_vector_check(list_of_test_vectors_input, list_of_test_vectors_output)
    +

    Testing the cipher with list of test vectors input and list of test vectors output.

    +

    INPUT:

    +
      +
    • list_of_test_vectors_inputlist; list of input testing vectors

    • +
    • list_of_test_vectors_outputlist; list of the expected output of the corresponding input testing +vectors. That is, list_of_test_vectors_output[i] = cipher.evaluate(list_of_test_vectors_input[i])

    • +
    +

    OUTPUT:

    +
      +
    • test_result – output of the testing. True if all the cipher.evaluate(input)=output for every input

    • +
    +

    test vectors, and False, otherwise.

    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(number_of_rounds=22)
    +sage: key1 = 0x1918111009080100
    +sage: plaintext1 = 0x6574694c
    +sage: ciphertext1 = 0xa86842f2
    +sage: key2 = 0x1918111009080100
    +sage: plaintext2 = 0x6574694d
    +sage: ciphertext2 = 0x2b5f25d6
    +sage: input_list=[[plaintext1, key1], [plaintext2, key2]]
    +sage: output_list=[ciphertext1, ciphertext2]
    +sage: speck.test_vector_check(input_list, output_list)
    +True
    +sage: input_list.append([0x11111111, 0x1111111111111111])
    +sage: output_list.append(0xFFFFFFFF)
    +sage: speck.test_vector_check(input_list, output_list)
    +Testing Failed
    +index: 2
    +input:  [286331153, 1229782938247303441]
    +output:  4294967295
    +False
    +
    +
    +
    + +
    +
    +tweakey_schedule(tweak, key, constant)
    +
    + +
    +
    +property type
    +
    + +
    + +
    + +
    + +

    Return a list of zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/ciphers/block_ciphers/simon_block_cipher.html b/docs/build/html/ciphers/block_ciphers/simon_block_cipher.html index a6bff56f..af108c18 100644 --- a/docs/build/html/ciphers/block_ciphers/simon_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/simon_block_cipher.html @@ -1,23 +1,24 @@ - + - Simon block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Simon block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Simon block cipher

    +

    Simon block cipher

    class SimonBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=None, rotation_amounts=[- 1, - 8, - 2])
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the SimonBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -227,94 +228,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -353,185 +271,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -564,53 +321,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -655,7 +365,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -669,11 +379,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -744,28 +457,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -801,35 +492,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -848,50 +510,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_round_key(round_keys, round_number)
    @@ -1162,43 +780,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1355,24 +936,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1507,38 +1070,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1615,70 +1146,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1703,13 +1180,13 @@

    Navigation

    This Page

    @@ -1727,7 +1204,7 @@

    Quick search

    - +
    @@ -1742,10 +1219,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1753,7 +1230,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/skinny_block_cipher.html b/docs/build/html/ciphers/block_ciphers/skinny_block_cipher.html index 5f7e0291..e3e95a35 100644 --- a/docs/build/html/ciphers/block_ciphers/skinny_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/skinny_block_cipher.html @@ -1,23 +1,24 @@ - + - Skinny block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Skinny block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Skinny block cipher

    +

    Skinny block cipher

    class SkinnyBlockCipher(block_bit_size=128, key_bit_size=384, number_of_rounds=40)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the SkinnyBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -239,94 +240,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -365,185 +283,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -576,53 +333,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -667,7 +377,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -681,11 +391,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -746,28 +459,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -803,35 +494,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -850,50 +512,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1169,43 +787,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1362,24 +943,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1519,38 +1082,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1627,70 +1158,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1730,13 +1207,13 @@

    Navigation

    This Page

    @@ -1754,7 +1231,7 @@

    Quick search

    - +
    @@ -1769,10 +1246,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1780,7 +1257,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/sparx_block_cipher.html b/docs/build/html/ciphers/block_ciphers/sparx_block_cipher.html index 0be8a64a..681b440c 100644 --- a/docs/build/html/ciphers/block_ciphers/sparx_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/sparx_block_cipher.html @@ -1,23 +1,24 @@ - + - Sparx block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sparx block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Sparx block cipher

    +

    Sparx block cipher

    class SparxBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=0, steps=0)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the SparxBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -245,64 +246,6 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    arx_box(input, i)
    @@ -318,31 +261,6 @@

    Navigation

    assign_functions_based_on(block_bit_size, key_bit_size)
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -381,185 +299,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -592,53 +349,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -683,7 +393,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -697,11 +407,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -762,28 +475,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -819,35 +510,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -866,50 +528,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1185,43 +803,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1378,24 +959,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1530,38 +1093,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1638,70 +1169,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1731,13 +1208,13 @@

    Navigation

    This Page

    @@ -1755,7 +1232,7 @@

    Quick search

    - +
    @@ -1770,10 +1247,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1781,7 +1258,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/speck_block_cipher.html b/docs/build/html/ciphers/block_ciphers/speck_block_cipher.html index 8e9f9f2c..84249f7c 100644 --- a/docs/build/html/ciphers/block_ciphers/speck_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/speck_block_cipher.html @@ -1,23 +1,24 @@ - + - Speck block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Speck block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Speck block cipher

    +

    Speck block cipher

    class SpeckBlockCipher(block_bit_size=32, key_bit_size=64, rotation_alpha=None, rotation_beta=None, number_of_rounds=0)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the SpeckBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -232,94 +233,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -358,185 +276,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -569,53 +326,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -660,7 +370,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -674,11 +384,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -739,28 +452,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -796,35 +487,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -843,50 +505,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1157,43 +775,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1350,24 +931,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1512,38 +1075,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1620,70 +1151,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1708,13 +1185,13 @@

    Navigation

    This Page

    @@ -1732,7 +1209,7 @@

    Quick search

    - +
    @@ -1747,10 +1224,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1758,7 +1235,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/speedy_block_cipher.html b/docs/build/html/ciphers/block_ciphers/speedy_block_cipher.html new file mode 100644 index 00000000..7e23c7ef --- /dev/null +++ b/docs/build/html/ciphers/block_ciphers/speedy_block_cipher.html @@ -0,0 +1,1317 @@ + + + + + + + + + Speedy block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Speedy block cipher

    +
    +
    +class SpeedyBlockCipher(block_bit_size=192, key_bit_size=192, number_of_rounds=1, alpha=(0, 1, 5, 9, 15, 21, 26), beta=7, gamma=1)
    +

    Bases: Cipher

    +

    Construct an instance of the SpeedyBlockCipher class.

    +

    The implementation follows the specifics in [LMM+2021].

    +

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    +

    Note that the l parameter of the cipher is automatically determined by block_bit_size and +key_bit_size. Please use the same value, multiple of 12, for both variables.

    +

    INPUT:

    +
      +
    • block_bit_sizeinteger (default: 192); cipher input and output block bit size of the cipher

    • +
    • key_bit_sizeinteger (default: 192); cipher key bit size of the cipher

    • +
    • number_of_roundsinteger (default: 1); number of rounds of the cipher.

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speedy_block_cipher import SpeedyBlockCipher
    +sage: speedy = SpeedyBlockCipher(number_of_rounds=5)
    +sage: plaintext = 0xa13a632451070e4382a27f26a40682f3fe9ff68028d24fdb
    +sage: key = 0x764c4f6254e1bff208e95862428faed01584f4207a7e8477
    +sage: ciphertext = 0x01da25a93d1cfc5e4c0b74f677eb746c281a260193b7755a
    +sage: speedy.evaluate([plaintext, key]) == ciphertext
    +True
    +
    +
    +
    +
    +add_AND_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_FSR_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_MODADD_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_MODSUB_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_NOT_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_OR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_SBOX_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_SHIFT_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_XOR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_cipher_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_concatenate_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_constant_component(output_bit_size, value)
    +
    + +
    +
    +add_intermediate_output_component(input_id_links, input_bit_positions, output_bit_size, output_tag)
    +
    + +
    +
    +add_linear_layer_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_mix_column_component(input_id_links, input_bit_positions, output_bit_size, mix_column_description)
    +
    + +
    +
    +add_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description)
    +
    + +
    +
    +add_reverse_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_round()
    +
    + +
    +
    +add_round_key_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_round_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_shift_rows_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_sigma_component(input_id_links, input_bit_positions, output_bit_size, rotation_amounts_parameter)
    +
    + +
    +
    +add_suffix_to_components(suffix, component_id_list=None)
    +
    + +
    +
    +add_theta_keccak_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_theta_xoodoo_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_variable_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_variable_shift_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    +
    + +
    +
    +as_python_dictionary()
    +
    + +
    +
    +cipher_inverse()
    +

    Return the graph representation of the inverse of the cipher under analysis

    +

    EXAMPLE:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: cipher = SpeckBlockCipher(number_of_rounds=2)
    +sage: ciphertext = cipher.evaluate([plaintext, key])
    +sage: cipher_inv = cipher.cipher_inverse()
    +sage: cipher_inv.evaluate([ciphertext, key]) == plaintext
    +True
    +
    +
    +
    + +
    +
    +cipher_partial_inverse(start_round=None, end_round=None, keep_key_schedule=False)
    +

    Returns the inverted portion of a cipher.

    +

    INPUT:

    +
      +
    • start_roundinteger; initial round number of the partial cipher

    • +
    • end_roundinteger; final round number of the partial cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: result = speck.evaluate([plaintext, key], intermediate_output=True)
    +sage: partial_speck = speck.cipher_partial_inverse(1, 2)
    +sage: partial_speck.evaluate([result[0], key]) == result[2]['intermediate_output_0_6'][0]
    +
    +
    +
    + +
    +
    +component_from(round_number, index)
    +
    + +
    +
    +convert_to_compound_xor_cipher()
    +
    + +
    +
    +create_networx_graph_from_input_ids()
    +
    + +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    + +
    +
    +property current_round
    +
    + +
    +
    +property current_round_number
    +
    + +
    +
    +property current_round_number_of_components
    +
    + +
    +
    +delete_generated_evaluate_c_shared_library()
    +

    Delete the file named <id_cipher>_evaluate.c and the corresponding executable.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().delete_generated_evaluate_c_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +evaluate(cipher_input, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs

    • +
    • intermediate_outputboolean (default: False); set this flag to True to return a dictionary with +each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True to print the input/output of each +component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().evaluate([0x01234567,0x89ABCDEF])
    +19088743
    +
    +
    +
    + +
    +
    +evaluate_using_c(inputs, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • inputs

    • +
    • intermediate_outputboolean (default: False); Set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); Set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy(number_of_rounds=2).evaluate_using_c([0x012345,0x89ABCD], True) # random
    +{'round_key_output': [3502917, 73728],
    + 'round_output': [9834215],
    + 'cipher_output': [7457252]}
    +
    +
    +
    + +
    +
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)
    +

    Return the output of the cipher for multiple inputs.

    +

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, +and cipher_inputs[1] the second. +Each of the inputs is given as a numpy ndarray of np.uint8, of shape n*m, where n is the size +(in bytes) of the input, and m is the number of samples.

    +

    The return is a list of m*n ndarrays (format transposed compared to the input format), +where the list is of size 1 if intermediate_output is False, and NUMBER_OF_ROUNDS otherwise.

    +

    This function determines automatically if a bit-based evaluation is required, +and does the transformation transparently. The inputs and outputs are similar to evaluate_vectorized_byte.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, +with m the number of inputs to evaluate)

    • +
    • intermediate_outputboolean (default: False)

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    • +
    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    +
    sage: import numpy as np
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    +sage: K=np.random.randint(256, size=(8,2), dtype=np.uint8)
    +sage: X=np.random.randint(256, size=(4,2), dtype=np.uint8)
    +sage: result=speck.evaluate_vectorized([X, K])
    +sage: K0Lib=int.from_bytes(K[:,0].tobytes(), byteorder='big')
    +sage: K1Lib=int.from_bytes(K[:,1].tobytes(), byteorder='big')
    +sage: X0Lib=int.from_bytes(X[:,0].tobytes(), byteorder='big')
    +sage: X1Lib=int.from_bytes(X[:,1].tobytes(), byteorder='big')
    +sage: C0Lib=speck.evaluate([X0Lib, K0Lib])
    +sage: C1Lib=speck.evaluate([X1Lib, K1Lib])
    +sage: int.from_bytes(result[-1][0].tobytes(), byteorder='big') == C0Lib
    +True
    +sage: int.from_bytes(result[-1][1].tobytes(), byteorder='big') == C1Lib
    +True
    +
    +
    +
    + +
    +
    +evaluate_with_intermediate_outputs_continuous_diffusion_analysis(cipher_input, sbox_precomputations, sbox_precomputations_mix_columns, verbosity=False)
    +

    Return the output of the continuous generalized cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist of Decimal; block cipher input message

    • +
    • sbox_precomputations dictionary

    • +
    • sbox_precomputations_mix_columns dictionary

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from decimal import *
    +sage: plaintext_input = [Decimal('1') for i in range(32)]
    +sage: plaintext_input[10] = Decimal('0.802999073954890452142763024312444031238555908203125')
    +sage: key_input = [Decimal('-1') for i in range(64)]
    +sage: cipher_inputs = [plaintext_input, key_input]
    +sage: output = speck(number_of_rounds=2).evaluate_with_intermediate_outputs_continuous_diffusion_analysis(
    +....:     cipher_inputs,
    +....:     {},
    +....:     {}
    +....: )
    +sage: output[0][0] == Decimal('-1.000000000')
    +True
    +
    +
    +
    + +
    +
    +property family_name
    +
    + +
    +
    +property file_name
    +
    + +
    +
    +find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    +

    From [SGLYTQH2017] : Finds impossible differentials or zero-correlation linear approximations (based on type) +by fixing the input and output iteratively to all possible Hamming weight 1 value, and asking the solver +to find a solution; if none is found, then the propagation is impossible. +Return a list of impossible differentials or zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • typestring; {“differential”, “linear”}: the type of property to search for

    • +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    +
    +generate_bit_based_c_code(intermediate_output=False, verbosity=False)
    +

    Return a string containing the C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: s = fancy().generate_bit_based_c_code()
    +sage: s[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    +

    Store the C code in a file named <id_cipher>_evaluate.c, and build the corresponding executable.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to make the C code +print a dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the C code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().generate_evaluate_c_code_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    +

    Return a string containing the optimized C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • word_sizeinteger; the size of the word

    • +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: word_based_c_code = speck().generate_word_based_c_code(20)
    +sage: word_based_c_code[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +get_all_components()
    +
    + +
    +
    +get_all_components_ids()
    +
    + +
    +
    +get_all_inputs_bit_positions()
    +
    + +
    +
    +get_component_from_id(component_id)
    +

    Return the component according to the id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: component = fancy.get_component_from_id('sbox_0_0')
    +sage: component.description
    +[0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15]
    +
    +
    +
    + +
    +
    +get_components_in_round(round_number)
    +
    + +
    +
    +get_current_component_id()
    +

    Use this function to get the current component id.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [4], 4)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(4, 0xF)
    +sage: constant_0_1 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.add_round()
    +sage: constant_1_0 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.get_current_component_id()
    +'constant_1_0'
    +
    +
    +
    + +
    +
    +get_model(technique, problem)
    +

    Returns a model for a given technique and problem.

    +

    INPUT:

    +
    +
      +
    • techniquestring ; sat, smt, milp or cp

    • +
    • problemstring ; xor_differential, xor_linear, cipher_model (more to be added as more model types are added to the library)

    • +
    +
    +
    + +
    +
    +get_number_of_components_in_round(round_number)
    +
    + +
    +
    +get_partial_cipher(start_round=None, end_round=None, keep_key_schedule=True)
    +
    + +
    +
    +get_round_from_component_id(component_id)
    +

    Return the round according to the round of the component id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: fancy.get_round_from_component_id('xor_1_14')
    +1
    +
    +
    +
    + +
    +
    +get_sizes_of_components_by_type()
    +
    + +
    +
    +property id
    +
    + +
    + +

    Return a list of impossible differentials if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    • scenariostring; the type of impossible differentials to search, single-key or related-key

    • +
    +
    + +
    +
    +property inputs
    +
    + +
    +
    +property inputs_bit_size
    +
    + +
    +
    +inputs_size_to_dict()
    +
    + +
    +
    +is_algebraically_secure(timeout)
    +

    Return True if the cipher is resistant against algebraic attack.

    +

    INPUT:

    +
      +
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • +
    +
    + +
    +
    +is_andrx()
    +

    Return True if the cipher is AndRX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_andrx()
    +False
    +
    +
    +
    + +
    +
    +is_arx()
    +

    Return True if the cipher is ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_arx()
    +False
    +
    +
    +
    + +
    +
    +is_power_of_2_word_based()
    +

    Return the word size if the cipher is word based (64, 32, 16 or 8 bits), False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: XTeaBlockCipher(number_of_rounds=32).is_power_of_2_word_based()
    +32
    +sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: MidoriBlockCipher(number_of_rounds=16).is_power_of_2_word_based()
    +False
    +
    +
    +
    + +
    +
    +is_shift_arx()
    +

    Return True if the cipher is Shift-ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: xtea = XTeaBlockCipher(number_of_rounds=32)
    +sage: xtea.is_shift_arx()
    +True
    +
    +
    +
    + +
    +
    +is_spn()
    +

    Return True if the cipher is SPN.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: aes = AESBlockCipher(number_of_rounds=2)
    +sage: aes.is_spn()
    +True
    +
    +
    +
    + +
    +
    +make_cipher_id()
    +
    + +
    +
    +make_file_name()
    +
    + +
    +
    +property number_of_rounds
    +
    + +
    +
    +property output_bit_size
    +
    + +
    +
    +polynomial_system()
    +

    Return a polynomial system for the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    +sage: IdentityBlockCipher().polynomial_system()
    +Polynomial Sequence with 128 Polynomials in 256 Variables
    +
    +
    +
    + +
    +
    +polynomial_system_at_round(r)
    +

    Return a polynomial system for the cipher at round r.

    +

    INPUT:

    +
      +
    • rinteger; round index

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: FancyBlockCipher(number_of_rounds=1).polynomial_system_at_round(0)
    +Polynomial Sequence with 252 Polynomials in 288 Variables
    +
    +
    +
    + +
    +
    +print()
    +

    Print the structure of the cipher into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print()
    +cipher_id = cipher_name_i32_o32_r1
    +cipher_type = permutation
    +cipher_inputs = ['input']
    +cipher_inputs_bit_size = [32]
    +cipher_output_bit_size = 32
    +cipher_number_of_rounds = 1
    +
    +    # round = 0 - round component = 0
    +    id = constant_0_0
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +
    +    # round = 0 - round component = 1
    +    id = constant_0_1
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +cipher_reference_code = None
    +
    +
    +
    + +
    +
    +print_as_python_dictionary()
    +

    Use this function to print the cipher as a python dictionary into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print_as_python_dictionary()
    +cipher = {
    +'cipher_id': 'cipher_name_k32_p32_o32_r1',
    +'cipher_type': 'block_cipher',
    +'cipher_inputs': ['key', 'plaintext'],
    +'cipher_inputs_bit_size': [32, 32],
    +'cipher_output_bit_size': 32,
    +'cipher_number_of_rounds': 1,
    +'cipher_rounds' : [
    +  # round 0
    +  [
    +  {
    +    # round = 0 - round component = 0
    +    'id': 'constant_0_0',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  {
    +    # round = 0 - round component = 1
    +    'id': 'constant_0_1',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  ],
    +  ],
    +'cipher_reference_code': None,
    +}
    +
    +
    +
    + +
    +
    +print_as_python_dictionary_to_file(file_name='')
    +

    Use this function to print the cipher as a python dictionary to a file.

    +

    INPUT:

    +
      +
    • file_namestring; a python string representing a valid file name

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.print_as_python_dictionary_to_file("claasp/ciphers/dictionary_example.py")
    +sage: os.remove("claasp/ciphers/dictionary_example.py")
    +
    +
    +
    + +
    +
    +print_evaluation_python_code(verbosity=False)
    +

    Print the python code that implement the evaluation function of the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().print_evaluation_python_code() # random
    +from copy import copy
    +from bitstring import BitArray
    +from claasp.cipher_modules.generic_functions import *
    +
    +def evaluate(input):
    +    plaintext_output = copy(BitArray(uint=input[0], length=32))
    +    key_output = copy(BitArray(uint=input[1], length=32))
    +    intermediate_output = {}
    +    intermediate_output['cipher_output'] = []
    +    intermediate_output['round_key_output'] = []
    +    components_io = {}
    +    component_input = BitArray(1)
    +
    +    # round: 0, component: 0, component_id: concatenate_0_0
    +    component_input = select_bits(key_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_0_output = component_input
    +    components_io['concatenate_0_0'] = [component_input.uint, concatenate_0_0_output.uint]
    +
    +    # round: 0, component: 1, component_id: intermediate_output_0_1
    +    component_input = select_bits(concatenate_0_0_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    intermediate_output_0_1_output = component_input
    +    intermediate_output['round_key_output'].append(intermediate_output_0_1_output.uint)
    +    components_io['intermediate_output_0_1'] = [component_input.uint, intermediate_output_0_1_output.uint]
    +
    +    # round: 0, component: 2, component_id: concatenate_0_2
    +    component_input = select_bits(plaintext_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_2_output = component_input
    +    components_io['concatenate_0_2'] = [component_input.uint, concatenate_0_2_output.uint]
    +
    +    # round: 0, component: 3, component_id: cipher_output_0_3
    +    component_input = select_bits(concatenate_0_2_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    cipher_output_0_3_output = component_input
    +    intermediate_output['cipher_output'].append(cipher_output_0_3_output.uint)
    +    cipher_output = cipher_output_0_3_output.uint
    +    components_io['cipher_output_0_3'] = [component_input.uint, cipher_output_0_3_output.uint]
    +
    +    return cipher_output, intermediate_output, components_io
    +
    +
    +
    + +
    +
    +print_evaluation_python_code_to_file(file_name)
    +

    Use this function to print the python code to a file.

    +

    INPUT:

    +
      +
    • file_namestring; name of the output file

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity = identity()
    +sage: identity.file_name
    +'identity_block_cipher_p32_k32_o32_r1.py'
    +sage: identity.print_evaluation_python_code_to_file(identity.id + 'evaluation.py') # doctest: +SKIP
    +
    +
    +
    + +
    +
    +print_input_information()
    +

    Print a list of the inputs with their corresponding bit size.

    +
    +
    Possible cipher inputs are:
      +
    • plaintext

    • +
    • key

    • +
    • tweak

    • +
    • initialization vector

    • +
    • nonce

    • +
    • constant

    • +
    • etc.

    • +
    +
    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher()
    +sage: fancy.print_input_information()
    +plaintext of bit size 24
    +key of bit size 24
    +
    +
    +
    + +
    +
    +property reference_code
    +
    + +
    +
    +remove_key_schedule()
    +
    + +
    +
    +remove_round_component(round_id, component)
    +
    + +
    +
    +remove_round_component_from_id(round_id, component_id)
    +
    + +
    +
    +property rounds
    +
    + +
    +
    +property rounds_as_list
    +
    + +
    +
    +set_file_name(file_name)
    +
    + +
    +
    +set_id(cipher_id)
    +
    + +
    +
    +set_inputs(inputs_ids_list, inputs_bit_size_list)
    +
    + +
    +
    +sort_cipher()
    +
    + +
    +
    +test_against_reference_code(number_of_tests=5)
    +

    Test the graph representation against its reference implementation (if available) with random inputs.

    +

    INPUT:

    +
      +
    • number_of_testsinteger (default: 5); number of tests to execute

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher as xtea
    +sage: xtea(number_of_rounds=32).test_against_reference_code()
    +True
    +
    +
    +
    + +
    +
    +test_vector_check(list_of_test_vectors_input, list_of_test_vectors_output)
    +

    Testing the cipher with list of test vectors input and list of test vectors output.

    +

    INPUT:

    +
      +
    • list_of_test_vectors_inputlist; list of input testing vectors

    • +
    • list_of_test_vectors_outputlist; list of the expected output of the corresponding input testing +vectors. That is, list_of_test_vectors_output[i] = cipher.evaluate(list_of_test_vectors_input[i])

    • +
    +

    OUTPUT:

    +
      +
    • test_result – output of the testing. True if all the cipher.evaluate(input)=output for every input

    • +
    +

    test vectors, and False, otherwise.

    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(number_of_rounds=22)
    +sage: key1 = 0x1918111009080100
    +sage: plaintext1 = 0x6574694c
    +sage: ciphertext1 = 0xa86842f2
    +sage: key2 = 0x1918111009080100
    +sage: plaintext2 = 0x6574694d
    +sage: ciphertext2 = 0x2b5f25d6
    +sage: input_list=[[plaintext1, key1], [plaintext2, key2]]
    +sage: output_list=[ciphertext1, ciphertext2]
    +sage: speck.test_vector_check(input_list, output_list)
    +True
    +sage: input_list.append([0x11111111, 0x1111111111111111])
    +sage: output_list.append(0xFFFFFFFF)
    +sage: speck.test_vector_check(input_list, output_list)
    +Testing Failed
    +index: 2
    +input:  [286331153, 1229782938247303441]
    +output:  4294967295
    +False
    +
    +
    +
    + +
    +
    +property type
    +
    + +
    + +
    + +
    + +

    Return a list of zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/ciphers/block_ciphers/tea_block_cipher.html b/docs/build/html/ciphers/block_ciphers/tea_block_cipher.html index d1176c5f..1128e306 100644 --- a/docs/build/html/ciphers/block_ciphers/tea_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/tea_block_cipher.html @@ -1,23 +1,24 @@ - + - Tea block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Tea block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Tea block cipher

    +

    Tea block cipher

    class TeaBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=0, right_shift_amount=5, left_shift_amount=4)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the TeaBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -228,94 +229,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -354,185 +272,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -565,53 +322,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -656,7 +366,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -670,11 +380,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -735,28 +448,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -792,35 +483,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -839,50 +501,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1148,43 +766,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1341,24 +922,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1493,38 +1056,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1601,70 +1132,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1689,13 +1166,13 @@

    Navigation

    This Page

    @@ -1713,7 +1190,7 @@

    Quick search

    - +
    @@ -1728,10 +1205,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1739,7 +1216,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/threefish_block_cipher.html b/docs/build/html/ciphers/block_ciphers/threefish_block_cipher.html index 1d94e3f7..221a2cb9 100644 --- a/docs/build/html/ciphers/block_ciphers/threefish_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/threefish_block_cipher.html @@ -1,23 +1,24 @@ - + - Threefish block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Threefish block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Threefish block cipher

    +

    Threefish block cipher

    class ThreefishBlockCipher(block_bit_size=256, key_bit_size=256, tweak_bit_size=128, number_of_rounds=0)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the ThreefishBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -231,94 +232,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -357,185 +275,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -568,53 +325,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -659,7 +369,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -673,11 +383,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -738,28 +451,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -795,35 +486,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -842,50 +504,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1156,43 +774,6 @@

    Navigation

    mix(data, d)
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1349,24 +930,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1501,38 +1064,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1614,70 +1145,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    word_permutation(data)
    @@ -1707,13 +1184,13 @@

    Navigation

    This Page

    @@ -1731,7 +1208,7 @@

    Quick search

    - +
    @@ -1746,10 +1223,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1757,7 +1234,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/twofish_block_cipher.html b/docs/build/html/ciphers/block_ciphers/twofish_block_cipher.html index 6ed8dc61..e5df27fe 100644 --- a/docs/build/html/ciphers/block_ciphers/twofish_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/twofish_block_cipher.html @@ -1,23 +1,24 @@ - + - Twofish block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Twofish block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Twofish block cipher

    +

    Twofish block cipher

    class TwofishBlockCipher(key_length=128, number_of_rounds=16)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the TwofishBlockCipher class.

    INPUT:

      @@ -228,94 +229,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -354,185 +272,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -565,53 +322,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -656,7 +366,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -670,11 +380,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -735,28 +448,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -792,35 +483,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -839,50 +501,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1153,43 +771,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1346,24 +927,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1498,38 +1061,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1606,70 +1137,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1694,13 +1171,13 @@

    Navigation

    This Page

    @@ -1718,7 +1195,7 @@

    Quick search

    - +
    @@ -1733,10 +1210,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1744,7 +1221,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/block_ciphers/xtea_block_cipher.html b/docs/build/html/ciphers/block_ciphers/xtea_block_cipher.html index 060b0469..b2af6f57 100644 --- a/docs/build/html/ciphers/block_ciphers/xtea_block_cipher.html +++ b/docs/build/html/ciphers/block_ciphers/xtea_block_cipher.html @@ -1,23 +1,24 @@ - + - Xtea block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Xtea block cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Xtea block cipher

    +

    Xtea block cipher

    class XTeaBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=0, right_shift_amount=5, left_shift_amount=4)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the XTeaBlockCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -228,94 +229,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -354,185 +272,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -565,53 +322,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -656,7 +366,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -670,11 +380,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -735,28 +448,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -792,35 +483,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -839,50 +501,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1148,43 +766,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1341,24 +922,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1493,38 +1056,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1601,70 +1132,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1689,13 +1166,13 @@

    Navigation

    This Page

    @@ -1713,7 +1190,7 @@

    Quick search

    - +
    @@ -1728,10 +1205,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1739,7 +1216,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/hash_functions/blake2_hash_function.html b/docs/build/html/ciphers/hash_functions/blake2_hash_function.html index 68c264ee..4e23190d 100644 --- a/docs/build/html/ciphers/hash_functions/blake2_hash_function.html +++ b/docs/build/html/ciphers/hash_functions/blake2_hash_function.html @@ -1,23 +1,24 @@ - + - Blake2 hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Blake2 hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Blake2 hash function

    +

    Blake2 hash function

    class Blake2HashFunction(block_bit_size=1024, state_bit_size=1024, number_of_rounds=0, word_size=64, permutations=None, rot_amounts=None)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the Blake2HashFunction class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    @@ -231,94 +232,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -362,185 +280,24 @@

    Navigation

    column_step(data_word_ids, data_word_ranges, state_word_ids, state_word_ranges, r)
    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -593,53 +350,6 @@

    Navigation

    diagonal_step(data_word_ids, data_word_ranges, state_word_ids, state_word_ranges, r)
    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -684,7 +394,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -698,11 +408,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -763,28 +476,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -820,35 +511,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -867,50 +529,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1176,43 +794,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1369,24 +950,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1521,38 +1084,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1634,70 +1165,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1722,13 +1199,13 @@

    Navigation

    This Page

    @@ -1746,7 +1223,7 @@

    Quick search

    - +
    @@ -1761,10 +1238,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1772,7 +1249,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/hash_functions/blake_hash_function.html b/docs/build/html/ciphers/hash_functions/blake_hash_function.html index 2b5bc76d..6d4668c2 100644 --- a/docs/build/html/ciphers/hash_functions/blake_hash_function.html +++ b/docs/build/html/ciphers/hash_functions/blake_hash_function.html @@ -1,23 +1,24 @@ - + - Blake hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Blake hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Blake hash function

    +

    Blake hash function

    class BlakeHashFunction(block_bit_size=512, state_bit_size=512, number_of_rounds=0, word_size=32, permutations=None, rot_amounts=None, constants=None)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the BlakeHashFunction class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    @@ -241,94 +242,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -372,185 +290,24 @@

    Navigation

    column_step(data_word_ids, data_word_ranges, state_word_ids, state_word_ranges, r)
    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -608,53 +365,6 @@

    Navigation

    diagonal_step(data_word_ids, data_word_ranges, state_word_ids, state_word_ranges, r)
    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -699,7 +409,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -713,11 +423,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -778,28 +491,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -835,35 +526,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -882,50 +544,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1191,43 +809,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1384,24 +965,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1536,38 +1099,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1649,70 +1180,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1737,13 +1214,13 @@

    Navigation

    This Page

    @@ -1761,7 +1238,7 @@

    Quick search

    - +
    @@ -1776,10 +1253,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1787,7 +1264,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/hash_functions/md5_hash_function.html b/docs/build/html/ciphers/hash_functions/md5_hash_function.html index f72b68db..699fe05d 100644 --- a/docs/build/html/ciphers/hash_functions/md5_hash_function.html +++ b/docs/build/html/ciphers/hash_functions/md5_hash_function.html @@ -1,23 +1,24 @@ - + - Md5 hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Md5 hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Md5 hash function

    +

    Md5 hash function

    This module has been coded following the original RFC 1321. Every variable name has been chosen to strictly adhere to the RFC.

    The input is named key because the hash function MD5 can be seen like a @@ -64,7 +65,7 @@

    Navigation

    class MD5HashFunction(word_size=32, number_of_rounds=64)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Returns a cipher object of MD5.

    Warning

    @@ -290,94 +291,11 @@

    Navigation

    add_xor_component_in_md5(component_0, component_1)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -416,185 +334,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -627,53 +384,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -718,7 +428,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -732,11 +442,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -797,28 +510,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -854,35 +545,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -901,50 +563,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1215,43 +833,6 @@

    Navigation

    md5_step(a, b, c, d, k, s, i, function, X, T)
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1408,24 +989,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1560,38 +1123,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1668,70 +1199,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1756,13 +1233,13 @@

    Navigation

    This Page

    @@ -1780,7 +1257,7 @@

    Quick search

    - +
    @@ -1795,10 +1272,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1806,7 +1283,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/hash_functions/sha1_hash_function.html b/docs/build/html/ciphers/hash_functions/sha1_hash_function.html index c717f218..180d806d 100644 --- a/docs/build/html/ciphers/hash_functions/sha1_hash_function.html +++ b/docs/build/html/ciphers/hash_functions/sha1_hash_function.html @@ -1,23 +1,24 @@ - + - Sha1 hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sha1 hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Sha1 hash function

    +

    Sha1 hash function

    This module has been coded following the original RFC 3174. Every variable name has been chosen to strictly adhere to the RFC.

    The input is named key because the hash function SHA-1 can be seen like a @@ -64,7 +65,7 @@

    Navigation

    class SHA1HashFunction(word_size=32, number_of_rounds=80)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Returns a cipher object of SHA1.

    Warning

    @@ -249,94 +250,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -375,190 +293,29 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    -
    compute_temp_and_s_30_b(A, B, E, ft_B_C_D, K, W)
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -591,53 +348,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -682,7 +392,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -696,11 +406,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -761,28 +474,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -818,35 +509,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -865,50 +527,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1174,43 +792,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1367,24 +948,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1534,38 +1097,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    schedule(W, t)
    @@ -1647,70 +1178,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1735,13 +1212,13 @@

    Navigation

    This Page

    @@ -1759,7 +1236,7 @@

    Quick search

    - +
    @@ -1774,10 +1251,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1785,7 +1262,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/hash_functions/sha2_hash_function.html b/docs/build/html/ciphers/hash_functions/sha2_hash_function.html index dcad7ba7..e0016cc8 100644 --- a/docs/build/html/ciphers/hash_functions/sha2_hash_function.html +++ b/docs/build/html/ciphers/hash_functions/sha2_hash_function.html @@ -1,23 +1,24 @@ - + - Sha2 hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sha2 hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Sha2 hash function

    +

    Sha2 hash function

    This module has been coded following the original RFC 6234. Every variable name has been chosen to strictly adhere to the RFC.

    The input is named key because the hash functions in the SHA-2 family can be @@ -65,7 +66,7 @@

    Navigation

    class SHA2HashFunction(output_bit_size=256, number_of_rounds=64)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Returns a cipher object of SHA-224, SHA-256, SHA-384 or SHA-512.

    Warning

    @@ -261,94 +262,11 @@

    Navigation

    add_xor_component_sha2(component_0, component_1)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -387,24 +305,6 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    @@ -420,54 +320,6 @@

    Navigation

    compute_ch(x, y, z)
    -
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    -
    compute_maj(x, y, z)
    @@ -479,113 +331,18 @@

    Navigation

    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -618,53 +375,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -709,7 +419,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -723,11 +433,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -788,28 +501,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -845,35 +536,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -892,50 +554,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1201,43 +819,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1394,24 +975,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1551,38 +1114,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    schedule(W, t)
    @@ -1664,70 +1195,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1752,13 +1229,13 @@

    Navigation

    This Page

    @@ -1776,7 +1253,7 @@

    Quick search

    - +
    @@ -1791,10 +1268,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1802,7 +1279,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/hash_functions/whirlpool_hash_function.html b/docs/build/html/ciphers/hash_functions/whirlpool_hash_function.html index 9877cc76..66d6380e 100644 --- a/docs/build/html/ciphers/hash_functions/whirlpool_hash_function.html +++ b/docs/build/html/ciphers/hash_functions/whirlpool_hash_function.html @@ -1,23 +1,24 @@ - + - Whirlpool hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Whirlpool hash function — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Whirlpool hash function

    +

    Whirlpool hash function

    class WhirlpoolHashFunction(number_of_rounds=10, word_size=8, state_size=8)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Returns a cipher object of Whirlpool hash function.

    @@ -230,94 +231,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -356,182 +274,11 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    -
    -
    convert_to_compound_xor_cipher()
    @@ -547,6 +294,11 @@

    Navigation

    create_mix_row_components(shift_column_components)
    +
    +
    +create_networx_graph_from_input_ids()
    +
    +
    create_round_constant_component(round_number)
    @@ -557,6 +309,11 @@

    Navigation

    create_shift_column_components(sboxes_components, word_size)
    +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    +
    property current_round
    @@ -587,53 +344,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -678,7 +388,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -692,11 +402,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -757,28 +470,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -814,35 +505,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -861,50 +523,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1170,43 +788,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1363,24 +944,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1515,38 +1078,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1623,70 +1154,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1711,13 +1188,13 @@

    Navigation

    This Page

    @@ -1735,7 +1212,7 @@

    Quick search

    - +
    @@ -1750,10 +1227,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1761,7 +1238,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/ascon_permutation.html b/docs/build/html/ciphers/permutations/ascon_permutation.html index f4e6c6c6..6bf2aec4 100644 --- a/docs/build/html/ciphers/permutations/ascon_permutation.html +++ b/docs/build/html/ciphers/permutations/ascon_permutation.html @@ -1,23 +1,24 @@ - + - Ascon permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Ascon permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Ascon permutation

    +

    Ascon permutation

    class AsconPermutation(number_of_rounds=12)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the AsconPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -224,94 +225,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -350,185 +268,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -561,53 +318,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -652,7 +362,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -666,11 +376,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -731,28 +444,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -788,35 +479,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -835,50 +497,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1144,43 +762,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1337,24 +918,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1494,38 +1057,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1602,70 +1133,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1690,13 +1167,13 @@

    Navigation

    This Page

    @@ -1714,7 +1191,7 @@

    Quick search

    - +
    @@ -1729,10 +1206,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1740,7 +1217,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/ascon_sbox_sigma_no_matrix_permutation.html b/docs/build/html/ciphers/permutations/ascon_sbox_sigma_no_matrix_permutation.html index 3a47e80e..03a7a5d0 100644 --- a/docs/build/html/ciphers/permutations/ascon_sbox_sigma_no_matrix_permutation.html +++ b/docs/build/html/ciphers/permutations/ascon_sbox_sigma_no_matrix_permutation.html @@ -1,23 +1,24 @@ - + - Ascon sbox sigma no matrix permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Ascon sbox sigma no matrix permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Ascon sbox sigma no matrix permutation

    +

    Ascon sbox sigma no matrix permutation

    class AsconSboxSigmaNoMatrixPermutation(number_of_rounds=12)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the AsconSboxSigmaNoMatrixPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -224,94 +225,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -350,185 +268,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -561,53 +318,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -652,7 +362,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -666,11 +376,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -731,28 +444,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -788,35 +479,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -835,50 +497,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1144,43 +762,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1337,24 +918,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1494,38 +1057,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1602,70 +1133,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1690,13 +1167,13 @@

    Navigation

    Previous topic

    -

    Gift sbox permutation

    +

    Utils

    This Page

    @@ -1714,7 +1191,7 @@

    Quick search

    - +
    @@ -1729,10 +1206,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1740,7 +1217,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/ascon_sbox_sigma_permutation.html b/docs/build/html/ciphers/permutations/ascon_sbox_sigma_permutation.html index a89e288a..a78b92a1 100644 --- a/docs/build/html/ciphers/permutations/ascon_sbox_sigma_permutation.html +++ b/docs/build/html/ciphers/permutations/ascon_sbox_sigma_permutation.html @@ -1,23 +1,24 @@ - + - Ascon sbox sigma permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Ascon sbox sigma permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Ascon sbox sigma permutation

    +

    Ascon sbox sigma permutation

    class AsconSboxSigmaPermutation(number_of_rounds=12)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the AsconSboxSigmaPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -224,94 +225,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -350,185 +268,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -561,53 +318,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -652,7 +362,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -666,11 +376,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -731,28 +444,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -788,35 +479,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -835,50 +497,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1144,43 +762,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1337,24 +918,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1494,38 +1057,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1602,70 +1133,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1690,13 +1167,13 @@

    Navigation

    This Page

    @@ -1714,7 +1191,7 @@

    Quick search

    - +
    @@ -1729,10 +1206,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1740,7 +1217,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/chacha_permutation.html b/docs/build/html/ciphers/permutations/chacha_permutation.html index 189ce744..cac8c627 100644 --- a/docs/build/html/ciphers/permutations/chacha_permutation.html +++ b/docs/build/html/ciphers/permutations/chacha_permutation.html @@ -1,22 +1,23 @@ - + - Chacha permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Chacha permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Chacha permutation

    +

    Chacha permutation

    -class ChachaPermutation(number_of_rounds=0, state_of_components=None, cipher_family='chacha_permutation', cipher_type='permutation', inputs=None, cipher_inputs_bit_size=None, rotations=[8, 7, 16, 12], word_size=32, start_round='odd')
    -

    Bases: claasp.cipher.Cipher

    +class ChachaPermutation(number_of_rounds=0, state_of_components=None, cipher_family='chacha_permutation', cipher_type='permutation', inputs=None, cipher_inputs_bit_size=None, rotations=[8, 7, 16, 12], word_size=32, start_round=('odd', 'top')) +

    Bases: Cipher

    Construct an instance of the ChachaPermutation class.

    This class is used to store compact representations of a permutation, used to generate the corresponding cipher. Additionally, one can use this class to implement ChaCha toy ciphers, such as the one described in [DEY2023].

    @@ -75,7 +76,7 @@

    Navigation

  • cipher_inputs_bit_sizeinteger (default: None)

  • rotationslist of integer (default: [8, 7, 16, 12])

  • word_sizeinteger (default: 32)

  • -
  • start_roundstring (default: odd)

  • +
  • start_roundtuple of strings (default: (odd, top)

  • EXAMPLES:

    sage: from claasp.ciphers.permutations.chacha_permutation import ChachaPermutation
    @@ -229,94 +230,11 @@ 

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    bottom_half_quarter_round(a, b, c, d, state)
    @@ -360,185 +278,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -571,53 +328,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -662,7 +372,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -676,11 +386,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -741,28 +454,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -798,35 +489,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -845,50 +507,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1154,43 +772,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1347,24 +928,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1499,38 +1062,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1612,70 +1143,16 @@

    Navigation

    top_half_quarter_round(a, b, c, d, state)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1705,8 +1182,8 @@

    Previous topic

    This Page

    @@ -1724,7 +1201,7 @@

    Quick search

    - +
    @@ -1739,7 +1216,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/ciphers/permutations/gaston_permutation.html b/docs/build/html/ciphers/permutations/gaston_permutation.html new file mode 100644 index 00000000..f663a624 --- /dev/null +++ b/docs/build/html/ciphers/permutations/gaston_permutation.html @@ -0,0 +1,1357 @@ + + + + + + + + + Gaston permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Gaston permutation

    +
    +
    +class GastonPermutation(number_of_rounds=12)
    +

    Bases: Cipher

    +

    Construct an instance of the Gaston Permutation class.

    +

    INPUT:

    +
    +
      +
    • number_of_roundsinteger (default: 12); number of rounds of the permutation

    • +
    +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.permutations.gaston_permutation import GastonPermutation
    +sage: gaston = GastonPermutation(number_of_rounds=12)
    +sage: plaintext = 0x0
    +sage: ciphertext = 0x88B326096BEBC6356CA8FB64BC5CE6CAF1CE3840D819071354D70067438689B5F17FE863F958F32B
    +sage: print(gaston.evaluate([plaintext]))==ciphertext)
    +True
    +
    +sage: plaintext=0x1F4AD9906DA6A2544B84D7F83F2BDDFA468A0853578A00E36C05A0506DF7F66E4EFB22112453C964
    +sage: ciphertext=0x1BA89B5B5C4583B622135709AE53417D9847B975E9EC9F3DCE042DF2A402591D563EC68FC30307EA
    +sage: print(gaston.evaluate([plaintext])==ciphertext)
    +True
    +
    +sage: plaintext=0xFFFFFFFFFFFFFFFF0123456789ABCDEFFEDCBA9876543210AAAAAAAAAAAAAAAA0101010101010101
    +sage: ciphertext=0x3117D51B14937067338F17F773C13F79DFB86E0868D252AB0D461D35EB863DE708BCE3E354C7231A
    +sage: print(gaston.evaluate([plaintext])==ciphertext)
    +True
    +
    +sage: gaston.number_of_rounds
    +12
    +sage: gaston.component_from(0, 0).id
    +'rot_0_0'
    +
    +
    +
    +
    +add_AND_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_FSR_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_MODADD_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_MODSUB_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_NOT_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_OR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_SBOX_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_SHIFT_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_XOR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_cipher_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_concatenate_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_constant_component(output_bit_size, value)
    +
    + +
    +
    +add_intermediate_output_component(input_id_links, input_bit_positions, output_bit_size, output_tag)
    +
    + +
    +
    +add_linear_layer_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_mix_column_component(input_id_links, input_bit_positions, output_bit_size, mix_column_description)
    +
    + +
    +
    +add_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description)
    +
    + +
    +
    +add_reverse_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_round()
    +
    + +
    +
    +add_round_key_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_round_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_shift_rows_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_sigma_component(input_id_links, input_bit_positions, output_bit_size, rotation_amounts_parameter)
    +
    + +
    +
    +add_suffix_to_components(suffix, component_id_list=None)
    +
    + +
    +
    +add_theta_keccak_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_theta_xoodoo_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_variable_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_variable_shift_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    +
    + +
    +
    +as_python_dictionary()
    +
    + +
    +
    +cipher_inverse()
    +

    Return the graph representation of the inverse of the cipher under analysis

    +

    EXAMPLE:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: cipher = SpeckBlockCipher(number_of_rounds=2)
    +sage: ciphertext = cipher.evaluate([plaintext, key])
    +sage: cipher_inv = cipher.cipher_inverse()
    +sage: cipher_inv.evaluate([ciphertext, key]) == plaintext
    +True
    +
    +
    +
    + +
    +
    +cipher_partial_inverse(start_round=None, end_round=None, keep_key_schedule=False)
    +

    Returns the inverted portion of a cipher.

    +

    INPUT:

    +
      +
    • start_roundinteger; initial round number of the partial cipher

    • +
    • end_roundinteger; final round number of the partial cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: result = speck.evaluate([plaintext, key], intermediate_output=True)
    +sage: partial_speck = speck.cipher_partial_inverse(1, 2)
    +sage: partial_speck.evaluate([result[0], key]) == result[2]['intermediate_output_0_6'][0]
    +
    +
    +
    + +
    +
    +component_from(round_number, index)
    +
    + +
    +
    +convert_to_compound_xor_cipher()
    +
    + +
    +
    +create_networx_graph_from_input_ids()
    +
    + +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    + +
    +
    +property current_round
    +
    + +
    +
    +property current_round_number
    +
    + +
    +
    +property current_round_number_of_components
    +
    + +
    +
    +delete_generated_evaluate_c_shared_library()
    +

    Delete the file named <id_cipher>_evaluate.c and the corresponding executable.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().delete_generated_evaluate_c_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +evaluate(cipher_input, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs

    • +
    • intermediate_outputboolean (default: False); set this flag to True to return a dictionary with +each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True to print the input/output of each +component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().evaluate([0x01234567,0x89ABCDEF])
    +19088743
    +
    +
    +
    + +
    +
    +evaluate_using_c(inputs, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • inputs

    • +
    • intermediate_outputboolean (default: False); Set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); Set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy(number_of_rounds=2).evaluate_using_c([0x012345,0x89ABCD], True) # random
    +{'round_key_output': [3502917, 73728],
    + 'round_output': [9834215],
    + 'cipher_output': [7457252]}
    +
    +
    +
    + +
    +
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)
    +

    Return the output of the cipher for multiple inputs.

    +

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, +and cipher_inputs[1] the second. +Each of the inputs is given as a numpy ndarray of np.uint8, of shape n*m, where n is the size +(in bytes) of the input, and m is the number of samples.

    +

    The return is a list of m*n ndarrays (format transposed compared to the input format), +where the list is of size 1 if intermediate_output is False, and NUMBER_OF_ROUNDS otherwise.

    +

    This function determines automatically if a bit-based evaluation is required, +and does the transformation transparently. The inputs and outputs are similar to evaluate_vectorized_byte.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, +with m the number of inputs to evaluate)

    • +
    • intermediate_outputboolean (default: False)

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    • +
    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    +
    sage: import numpy as np
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    +sage: K=np.random.randint(256, size=(8,2), dtype=np.uint8)
    +sage: X=np.random.randint(256, size=(4,2), dtype=np.uint8)
    +sage: result=speck.evaluate_vectorized([X, K])
    +sage: K0Lib=int.from_bytes(K[:,0].tobytes(), byteorder='big')
    +sage: K1Lib=int.from_bytes(K[:,1].tobytes(), byteorder='big')
    +sage: X0Lib=int.from_bytes(X[:,0].tobytes(), byteorder='big')
    +sage: X1Lib=int.from_bytes(X[:,1].tobytes(), byteorder='big')
    +sage: C0Lib=speck.evaluate([X0Lib, K0Lib])
    +sage: C1Lib=speck.evaluate([X1Lib, K1Lib])
    +sage: int.from_bytes(result[-1][0].tobytes(), byteorder='big') == C0Lib
    +True
    +sage: int.from_bytes(result[-1][1].tobytes(), byteorder='big') == C1Lib
    +True
    +
    +
    +
    + +
    +
    +evaluate_with_intermediate_outputs_continuous_diffusion_analysis(cipher_input, sbox_precomputations, sbox_precomputations_mix_columns, verbosity=False)
    +

    Return the output of the continuous generalized cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist of Decimal; block cipher input message

    • +
    • sbox_precomputations dictionary

    • +
    • sbox_precomputations_mix_columns dictionary

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from decimal import *
    +sage: plaintext_input = [Decimal('1') for i in range(32)]
    +sage: plaintext_input[10] = Decimal('0.802999073954890452142763024312444031238555908203125')
    +sage: key_input = [Decimal('-1') for i in range(64)]
    +sage: cipher_inputs = [plaintext_input, key_input]
    +sage: output = speck(number_of_rounds=2).evaluate_with_intermediate_outputs_continuous_diffusion_analysis(
    +....:     cipher_inputs,
    +....:     {},
    +....:     {}
    +....: )
    +sage: output[0][0] == Decimal('-1.000000000')
    +True
    +
    +
    +
    + +
    +
    +property family_name
    +
    + +
    +
    +property file_name
    +
    + +
    +
    +find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    +

    From [SGLYTQH2017] : Finds impossible differentials or zero-correlation linear approximations (based on type) +by fixing the input and output iteratively to all possible Hamming weight 1 value, and asking the solver +to find a solution; if none is found, then the propagation is impossible. +Return a list of impossible differentials or zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • typestring; {“differential”, “linear”}: the type of property to search for

    • +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    +
    +gaston_chi(state)
    +
    + +
    +
    +gaston_iota(state, rc)
    +
    + +
    +
    +gaston_rho_east(state)
    +
    + +
    +
    +gaston_rho_west(state)
    +
    + +
    +
    +gaston_round_function(state, rc)
    +
    + +
    +
    +gaston_theta(state)
    +
    + +
    +
    +generate_bit_based_c_code(intermediate_output=False, verbosity=False)
    +

    Return a string containing the C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: s = fancy().generate_bit_based_c_code()
    +sage: s[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    +

    Store the C code in a file named <id_cipher>_evaluate.c, and build the corresponding executable.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to make the C code +print a dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the C code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().generate_evaluate_c_code_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    +

    Return a string containing the optimized C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • word_sizeinteger; the size of the word

    • +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: word_based_c_code = speck().generate_word_based_c_code(20)
    +sage: word_based_c_code[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +get_all_components()
    +
    + +
    +
    +get_all_components_ids()
    +
    + +
    +
    +get_all_inputs_bit_positions()
    +
    + +
    +
    +get_component_from_id(component_id)
    +

    Return the component according to the id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: component = fancy.get_component_from_id('sbox_0_0')
    +sage: component.description
    +[0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15]
    +
    +
    +
    + +
    +
    +get_components_in_round(round_number)
    +
    + +
    +
    +get_current_component_id()
    +

    Use this function to get the current component id.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [4], 4)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(4, 0xF)
    +sage: constant_0_1 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.add_round()
    +sage: constant_1_0 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.get_current_component_id()
    +'constant_1_0'
    +
    +
    +
    + +
    +
    +get_model(technique, problem)
    +

    Returns a model for a given technique and problem.

    +

    INPUT:

    +
    +
      +
    • techniquestring ; sat, smt, milp or cp

    • +
    • problemstring ; xor_differential, xor_linear, cipher_model (more to be added as more model types are added to the library)

    • +
    +
    +
    + +
    +
    +get_number_of_components_in_round(round_number)
    +
    + +
    +
    +get_partial_cipher(start_round=None, end_round=None, keep_key_schedule=True)
    +
    + +
    +
    +get_round_from_component_id(component_id)
    +

    Return the round according to the round of the component id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: fancy.get_round_from_component_id('xor_1_14')
    +1
    +
    +
    +
    + +
    +
    +get_sizes_of_components_by_type()
    +
    + +
    +
    +property id
    +
    + +
    + +

    Return a list of impossible differentials if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    • scenariostring; the type of impossible differentials to search, single-key or related-key

    • +
    +
    + +
    +
    +property inputs
    +
    + +
    +
    +property inputs_bit_size
    +
    + +
    +
    +inputs_size_to_dict()
    +
    + +
    +
    +is_algebraically_secure(timeout)
    +

    Return True if the cipher is resistant against algebraic attack.

    +

    INPUT:

    +
      +
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • +
    +
    + +
    +
    +is_andrx()
    +

    Return True if the cipher is AndRX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_andrx()
    +False
    +
    +
    +
    + +
    +
    +is_arx()
    +

    Return True if the cipher is ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_arx()
    +False
    +
    +
    +
    + +
    +
    +is_power_of_2_word_based()
    +

    Return the word size if the cipher is word based (64, 32, 16 or 8 bits), False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: XTeaBlockCipher(number_of_rounds=32).is_power_of_2_word_based()
    +32
    +sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: MidoriBlockCipher(number_of_rounds=16).is_power_of_2_word_based()
    +False
    +
    +
    +
    + +
    +
    +is_shift_arx()
    +

    Return True if the cipher is Shift-ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: xtea = XTeaBlockCipher(number_of_rounds=32)
    +sage: xtea.is_shift_arx()
    +True
    +
    +
    +
    + +
    +
    +is_spn()
    +

    Return True if the cipher is SPN.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: aes = AESBlockCipher(number_of_rounds=2)
    +sage: aes.is_spn()
    +True
    +
    +
    +
    + +
    +
    +make_cipher_id()
    +
    + +
    +
    +make_file_name()
    +
    + +
    +
    +property number_of_rounds
    +
    + +
    +
    +property output_bit_size
    +
    + +
    +
    +polynomial_system()
    +

    Return a polynomial system for the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    +sage: IdentityBlockCipher().polynomial_system()
    +Polynomial Sequence with 128 Polynomials in 256 Variables
    +
    +
    +
    + +
    +
    +polynomial_system_at_round(r)
    +

    Return a polynomial system for the cipher at round r.

    +

    INPUT:

    +
      +
    • rinteger; round index

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: FancyBlockCipher(number_of_rounds=1).polynomial_system_at_round(0)
    +Polynomial Sequence with 252 Polynomials in 288 Variables
    +
    +
    +
    + +
    +
    +print()
    +

    Print the structure of the cipher into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print()
    +cipher_id = cipher_name_i32_o32_r1
    +cipher_type = permutation
    +cipher_inputs = ['input']
    +cipher_inputs_bit_size = [32]
    +cipher_output_bit_size = 32
    +cipher_number_of_rounds = 1
    +
    +    # round = 0 - round component = 0
    +    id = constant_0_0
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +
    +    # round = 0 - round component = 1
    +    id = constant_0_1
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +cipher_reference_code = None
    +
    +
    +
    + +
    +
    +print_as_python_dictionary()
    +

    Use this function to print the cipher as a python dictionary into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print_as_python_dictionary()
    +cipher = {
    +'cipher_id': 'cipher_name_k32_p32_o32_r1',
    +'cipher_type': 'block_cipher',
    +'cipher_inputs': ['key', 'plaintext'],
    +'cipher_inputs_bit_size': [32, 32],
    +'cipher_output_bit_size': 32,
    +'cipher_number_of_rounds': 1,
    +'cipher_rounds' : [
    +  # round 0
    +  [
    +  {
    +    # round = 0 - round component = 0
    +    'id': 'constant_0_0',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  {
    +    # round = 0 - round component = 1
    +    'id': 'constant_0_1',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  ],
    +  ],
    +'cipher_reference_code': None,
    +}
    +
    +
    +
    + +
    +
    +print_as_python_dictionary_to_file(file_name='')
    +

    Use this function to print the cipher as a python dictionary to a file.

    +

    INPUT:

    +
      +
    • file_namestring; a python string representing a valid file name

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.print_as_python_dictionary_to_file("claasp/ciphers/dictionary_example.py")
    +sage: os.remove("claasp/ciphers/dictionary_example.py")
    +
    +
    +
    + +
    +
    +print_evaluation_python_code(verbosity=False)
    +

    Print the python code that implement the evaluation function of the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().print_evaluation_python_code() # random
    +from copy import copy
    +from bitstring import BitArray
    +from claasp.cipher_modules.generic_functions import *
    +
    +def evaluate(input):
    +    plaintext_output = copy(BitArray(uint=input[0], length=32))
    +    key_output = copy(BitArray(uint=input[1], length=32))
    +    intermediate_output = {}
    +    intermediate_output['cipher_output'] = []
    +    intermediate_output['round_key_output'] = []
    +    components_io = {}
    +    component_input = BitArray(1)
    +
    +    # round: 0, component: 0, component_id: concatenate_0_0
    +    component_input = select_bits(key_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_0_output = component_input
    +    components_io['concatenate_0_0'] = [component_input.uint, concatenate_0_0_output.uint]
    +
    +    # round: 0, component: 1, component_id: intermediate_output_0_1
    +    component_input = select_bits(concatenate_0_0_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    intermediate_output_0_1_output = component_input
    +    intermediate_output['round_key_output'].append(intermediate_output_0_1_output.uint)
    +    components_io['intermediate_output_0_1'] = [component_input.uint, intermediate_output_0_1_output.uint]
    +
    +    # round: 0, component: 2, component_id: concatenate_0_2
    +    component_input = select_bits(plaintext_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_2_output = component_input
    +    components_io['concatenate_0_2'] = [component_input.uint, concatenate_0_2_output.uint]
    +
    +    # round: 0, component: 3, component_id: cipher_output_0_3
    +    component_input = select_bits(concatenate_0_2_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    cipher_output_0_3_output = component_input
    +    intermediate_output['cipher_output'].append(cipher_output_0_3_output.uint)
    +    cipher_output = cipher_output_0_3_output.uint
    +    components_io['cipher_output_0_3'] = [component_input.uint, cipher_output_0_3_output.uint]
    +
    +    return cipher_output, intermediate_output, components_io
    +
    +
    +
    + +
    +
    +print_evaluation_python_code_to_file(file_name)
    +

    Use this function to print the python code to a file.

    +

    INPUT:

    +
      +
    • file_namestring; name of the output file

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity = identity()
    +sage: identity.file_name
    +'identity_block_cipher_p32_k32_o32_r1.py'
    +sage: identity.print_evaluation_python_code_to_file(identity.id + 'evaluation.py') # doctest: +SKIP
    +
    +
    +
    + +
    +
    +print_input_information()
    +

    Print a list of the inputs with their corresponding bit size.

    +
    +
    Possible cipher inputs are:
      +
    • plaintext

    • +
    • key

    • +
    • tweak

    • +
    • initialization vector

    • +
    • nonce

    • +
    • constant

    • +
    • etc.

    • +
    +
    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher()
    +sage: fancy.print_input_information()
    +plaintext of bit size 24
    +key of bit size 24
    +
    +
    +
    + +
    +
    +property reference_code
    +
    + +
    +
    +remove_key_schedule()
    +
    + +
    +
    +remove_round_component(round_id, component)
    +
    + +
    +
    +remove_round_component_from_id(round_id, component_id)
    +
    + +
    +
    +property rounds
    +
    + +
    +
    +property rounds_as_list
    +
    + +
    +
    +set_file_name(file_name)
    +
    + +
    +
    +set_id(cipher_id)
    +
    + +
    +
    +set_inputs(inputs_ids_list, inputs_bit_size_list)
    +
    + +
    +
    +sort_cipher()
    +
    + +
    +
    +test_against_reference_code(number_of_tests=5)
    +

    Test the graph representation against its reference implementation (if available) with random inputs.

    +

    INPUT:

    +
      +
    • number_of_testsinteger (default: 5); number of tests to execute

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher as xtea
    +sage: xtea(number_of_rounds=32).test_against_reference_code()
    +True
    +
    +
    +
    + +
    +
    +test_vector_check(list_of_test_vectors_input, list_of_test_vectors_output)
    +

    Testing the cipher with list of test vectors input and list of test vectors output.

    +

    INPUT:

    +
      +
    • list_of_test_vectors_inputlist; list of input testing vectors

    • +
    • list_of_test_vectors_outputlist; list of the expected output of the corresponding input testing +vectors. That is, list_of_test_vectors_output[i] = cipher.evaluate(list_of_test_vectors_input[i])

    • +
    +

    OUTPUT:

    +
      +
    • test_result – output of the testing. True if all the cipher.evaluate(input)=output for every input

    • +
    +

    test vectors, and False, otherwise.

    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(number_of_rounds=22)
    +sage: key1 = 0x1918111009080100
    +sage: plaintext1 = 0x6574694c
    +sage: ciphertext1 = 0xa86842f2
    +sage: key2 = 0x1918111009080100
    +sage: plaintext2 = 0x6574694d
    +sage: ciphertext2 = 0x2b5f25d6
    +sage: input_list=[[plaintext1, key1], [plaintext2, key2]]
    +sage: output_list=[ciphertext1, ciphertext2]
    +sage: speck.test_vector_check(input_list, output_list)
    +True
    +sage: input_list.append([0x11111111, 0x1111111111111111])
    +sage: output_list.append(0xFFFFFFFF)
    +sage: speck.test_vector_check(input_list, output_list)
    +Testing Failed
    +index: 2
    +input:  [286331153, 1229782938247303441]
    +output:  4294967295
    +False
    +
    +
    +
    + +
    +
    +property type
    +
    + +
    + +
    + +
    + +

    Return a list of zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/ciphers/permutations/gaston_sbox_permutation.html b/docs/build/html/ciphers/permutations/gaston_sbox_permutation.html new file mode 100644 index 00000000..4e442bfb --- /dev/null +++ b/docs/build/html/ciphers/permutations/gaston_sbox_permutation.html @@ -0,0 +1,1358 @@ + + + + + + + + + Gaston sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    Gaston sbox permutation

    +
    +
    +class GastonSboxPermutation(number_of_rounds=12)
    +

    Bases: Cipher

    +

    Construct an instance of the Gaston Permutation class with Sbox component.

    +

    INPUT:

    +
    +
      +
    • number_of_roundsinteger (default: 12); number of rounds of the permutation

    • +
    +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.permutations.gaston_sbox_permutation import GastonSboxPermutation
    +sage: gaston = GastonSboxPermutation(number_of_rounds=12)
    +
    +sage: plaintext = 0x00000000000000010000000000000001000000000000000100000000000000010000000000000001
    +sage: ciphertext = 0x202d7fa691663e77043cb03594656fcdf6747f2da9cd9200ec3380fde8ec84d565247e6763406084
    +sage: print(gaston.evaluate([plaintext])==ciphertext)
    +True
    +
    +sage: plaintext = 0x0
    +sage: ciphertext = 0x88B326096BEBC6356CA8FB64BC5CE6CAF1CE3840D819071354D70067438689B5F17FE863F958F32B
    +sage: print(gaston.evaluate([plaintext])==ciphertext)
    +True
    +
    +sage: plaintext=0x1F4AD9906DA6A2544B84D7F83F2BDDFA468A0853578A00E36C05A0506DF7F66E4EFB22112453C964
    +sage: ciphertext=0x1BA89B5B5C4583B622135709AE53417D9847B975E9EC9F3DCE042DF2A402591D563EC68FC30307EA
    +sage: print(gaston.evaluate([plaintext])==ciphertext)
    +True
    +
    +sage: plaintext=0xFFFFFFFFFFFFFFFF0123456789ABCDEFFEDCBA9876543210AAAAAAAAAAAAAAAA0101010101010101
    +sage: ciphertext=0x3117D51B14937067338F17F773C13F79DFB86E0868D252AB0D461D35EB863DE708BCE3E354C7231A
    +sage: print(gaston.evaluate([plaintext])==ciphertext)
    +True
    +
    +
    +
    +
    +add_AND_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_FSR_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_MODADD_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_MODSUB_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_NOT_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_OR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_SBOX_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_SHIFT_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_XOR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_cipher_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_concatenate_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_constant_component(output_bit_size, value)
    +
    + +
    +
    +add_intermediate_output_component(input_id_links, input_bit_positions, output_bit_size, output_tag)
    +
    + +
    +
    +add_linear_layer_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_mix_column_component(input_id_links, input_bit_positions, output_bit_size, mix_column_description)
    +
    + +
    +
    +add_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description)
    +
    + +
    +
    +add_reverse_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_round()
    +
    + +
    +
    +add_round_key_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_round_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_shift_rows_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_sigma_component(input_id_links, input_bit_positions, output_bit_size, rotation_amounts_parameter)
    +
    + +
    +
    +add_suffix_to_components(suffix, component_id_list=None)
    +
    + +
    +
    +add_theta_keccak_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_theta_xoodoo_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_variable_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_variable_shift_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    +
    + +
    +
    +as_python_dictionary()
    +
    + +
    +
    +cipher_inverse()
    +

    Return the graph representation of the inverse of the cipher under analysis

    +

    EXAMPLE:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: cipher = SpeckBlockCipher(number_of_rounds=2)
    +sage: ciphertext = cipher.evaluate([plaintext, key])
    +sage: cipher_inv = cipher.cipher_inverse()
    +sage: cipher_inv.evaluate([ciphertext, key]) == plaintext
    +True
    +
    +
    +
    + +
    +
    +cipher_partial_inverse(start_round=None, end_round=None, keep_key_schedule=False)
    +

    Returns the inverted portion of a cipher.

    +

    INPUT:

    +
      +
    • start_roundinteger; initial round number of the partial cipher

    • +
    • end_roundinteger; final round number of the partial cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: result = speck.evaluate([plaintext, key], intermediate_output=True)
    +sage: partial_speck = speck.cipher_partial_inverse(1, 2)
    +sage: partial_speck.evaluate([result[0], key]) == result[2]['intermediate_output_0_6'][0]
    +
    +
    +
    + +
    +
    +component_from(round_number, index)
    +
    + +
    +
    +convert_to_compound_xor_cipher()
    +
    + +
    +
    +create_networx_graph_from_input_ids()
    +
    + +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    + +
    +
    +property current_round
    +
    + +
    +
    +property current_round_number
    +
    + +
    +
    +property current_round_number_of_components
    +
    + +
    +
    +delete_generated_evaluate_c_shared_library()
    +

    Delete the file named <id_cipher>_evaluate.c and the corresponding executable.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().delete_generated_evaluate_c_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +evaluate(cipher_input, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs

    • +
    • intermediate_outputboolean (default: False); set this flag to True to return a dictionary with +each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True to print the input/output of each +component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().evaluate([0x01234567,0x89ABCDEF])
    +19088743
    +
    +
    +
    + +
    +
    +evaluate_using_c(inputs, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • inputs

    • +
    • intermediate_outputboolean (default: False); Set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); Set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy(number_of_rounds=2).evaluate_using_c([0x012345,0x89ABCD], True) # random
    +{'round_key_output': [3502917, 73728],
    + 'round_output': [9834215],
    + 'cipher_output': [7457252]}
    +
    +
    +
    + +
    +
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)
    +

    Return the output of the cipher for multiple inputs.

    +

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, +and cipher_inputs[1] the second. +Each of the inputs is given as a numpy ndarray of np.uint8, of shape n*m, where n is the size +(in bytes) of the input, and m is the number of samples.

    +

    The return is a list of m*n ndarrays (format transposed compared to the input format), +where the list is of size 1 if intermediate_output is False, and NUMBER_OF_ROUNDS otherwise.

    +

    This function determines automatically if a bit-based evaluation is required, +and does the transformation transparently. The inputs and outputs are similar to evaluate_vectorized_byte.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, +with m the number of inputs to evaluate)

    • +
    • intermediate_outputboolean (default: False)

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    • +
    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    +
    sage: import numpy as np
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    +sage: K=np.random.randint(256, size=(8,2), dtype=np.uint8)
    +sage: X=np.random.randint(256, size=(4,2), dtype=np.uint8)
    +sage: result=speck.evaluate_vectorized([X, K])
    +sage: K0Lib=int.from_bytes(K[:,0].tobytes(), byteorder='big')
    +sage: K1Lib=int.from_bytes(K[:,1].tobytes(), byteorder='big')
    +sage: X0Lib=int.from_bytes(X[:,0].tobytes(), byteorder='big')
    +sage: X1Lib=int.from_bytes(X[:,1].tobytes(), byteorder='big')
    +sage: C0Lib=speck.evaluate([X0Lib, K0Lib])
    +sage: C1Lib=speck.evaluate([X1Lib, K1Lib])
    +sage: int.from_bytes(result[-1][0].tobytes(), byteorder='big') == C0Lib
    +True
    +sage: int.from_bytes(result[-1][1].tobytes(), byteorder='big') == C1Lib
    +True
    +
    +
    +
    + +
    +
    +evaluate_with_intermediate_outputs_continuous_diffusion_analysis(cipher_input, sbox_precomputations, sbox_precomputations_mix_columns, verbosity=False)
    +

    Return the output of the continuous generalized cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist of Decimal; block cipher input message

    • +
    • sbox_precomputations dictionary

    • +
    • sbox_precomputations_mix_columns dictionary

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from decimal import *
    +sage: plaintext_input = [Decimal('1') for i in range(32)]
    +sage: plaintext_input[10] = Decimal('0.802999073954890452142763024312444031238555908203125')
    +sage: key_input = [Decimal('-1') for i in range(64)]
    +sage: cipher_inputs = [plaintext_input, key_input]
    +sage: output = speck(number_of_rounds=2).evaluate_with_intermediate_outputs_continuous_diffusion_analysis(
    +....:     cipher_inputs,
    +....:     {},
    +....:     {}
    +....: )
    +sage: output[0][0] == Decimal('-1.000000000')
    +True
    +
    +
    +
    + +
    +
    +property family_name
    +
    + +
    +
    +property file_name
    +
    + +
    +
    +find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    +

    From [SGLYTQH2017] : Finds impossible differentials or zero-correlation linear approximations (based on type) +by fixing the input and output iteratively to all possible Hamming weight 1 value, and asking the solver +to find a solution; if none is found, then the propagation is impossible. +Return a list of impossible differentials or zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • typestring; {“differential”, “linear”}: the type of property to search for

    • +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    +
    +gaston_chi_sbox(state)
    +
    + +
    +
    +gaston_iota(state, rc)
    +
    + +
    +
    +gaston_rho_east(state)
    +
    + +
    +
    +gaston_rho_west(state)
    +
    + +
    +
    +gaston_round_function(state, rc)
    +
    + +
    +
    +gaston_theta(state)
    +
    + +
    +
    +generate_bit_based_c_code(intermediate_output=False, verbosity=False)
    +

    Return a string containing the C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: s = fancy().generate_bit_based_c_code()
    +sage: s[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    +

    Store the C code in a file named <id_cipher>_evaluate.c, and build the corresponding executable.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to make the C code +print a dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the C code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().generate_evaluate_c_code_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    +

    Return a string containing the optimized C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • word_sizeinteger; the size of the word

    • +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: word_based_c_code = speck().generate_word_based_c_code(20)
    +sage: word_based_c_code[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +get_all_components()
    +
    + +
    +
    +get_all_components_ids()
    +
    + +
    +
    +get_all_inputs_bit_positions()
    +
    + +
    +
    +get_component_from_id(component_id)
    +

    Return the component according to the id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: component = fancy.get_component_from_id('sbox_0_0')
    +sage: component.description
    +[0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15]
    +
    +
    +
    + +
    +
    +get_components_in_round(round_number)
    +
    + +
    +
    +get_current_component_id()
    +

    Use this function to get the current component id.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [4], 4)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(4, 0xF)
    +sage: constant_0_1 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.add_round()
    +sage: constant_1_0 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.get_current_component_id()
    +'constant_1_0'
    +
    +
    +
    + +
    +
    +get_model(technique, problem)
    +

    Returns a model for a given technique and problem.

    +

    INPUT:

    +
    +
      +
    • techniquestring ; sat, smt, milp or cp

    • +
    • problemstring ; xor_differential, xor_linear, cipher_model (more to be added as more model types are added to the library)

    • +
    +
    +
    + +
    +
    +get_number_of_components_in_round(round_number)
    +
    + +
    +
    +get_partial_cipher(start_round=None, end_round=None, keep_key_schedule=True)
    +
    + +
    +
    +get_round_from_component_id(component_id)
    +

    Return the round according to the round of the component id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: fancy.get_round_from_component_id('xor_1_14')
    +1
    +
    +
    +
    + +
    +
    +get_sizes_of_components_by_type()
    +
    + +
    +
    +property id
    +
    + +
    + +

    Return a list of impossible differentials if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    • scenariostring; the type of impossible differentials to search, single-key or related-key

    • +
    +
    + +
    +
    +property inputs
    +
    + +
    +
    +property inputs_bit_size
    +
    + +
    +
    +inputs_size_to_dict()
    +
    + +
    +
    +is_algebraically_secure(timeout)
    +

    Return True if the cipher is resistant against algebraic attack.

    +

    INPUT:

    +
      +
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • +
    +
    + +
    +
    +is_andrx()
    +

    Return True if the cipher is AndRX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_andrx()
    +False
    +
    +
    +
    + +
    +
    +is_arx()
    +

    Return True if the cipher is ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_arx()
    +False
    +
    +
    +
    + +
    +
    +is_power_of_2_word_based()
    +

    Return the word size if the cipher is word based (64, 32, 16 or 8 bits), False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: XTeaBlockCipher(number_of_rounds=32).is_power_of_2_word_based()
    +32
    +sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: MidoriBlockCipher(number_of_rounds=16).is_power_of_2_word_based()
    +False
    +
    +
    +
    + +
    +
    +is_shift_arx()
    +

    Return True if the cipher is Shift-ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: xtea = XTeaBlockCipher(number_of_rounds=32)
    +sage: xtea.is_shift_arx()
    +True
    +
    +
    +
    + +
    +
    +is_spn()
    +

    Return True if the cipher is SPN.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: aes = AESBlockCipher(number_of_rounds=2)
    +sage: aes.is_spn()
    +True
    +
    +
    +
    + +
    +
    +make_cipher_id()
    +
    + +
    +
    +make_file_name()
    +
    + +
    +
    +property number_of_rounds
    +
    + +
    +
    +property output_bit_size
    +
    + +
    +
    +polynomial_system()
    +

    Return a polynomial system for the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    +sage: IdentityBlockCipher().polynomial_system()
    +Polynomial Sequence with 128 Polynomials in 256 Variables
    +
    +
    +
    + +
    +
    +polynomial_system_at_round(r)
    +

    Return a polynomial system for the cipher at round r.

    +

    INPUT:

    +
      +
    • rinteger; round index

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: FancyBlockCipher(number_of_rounds=1).polynomial_system_at_round(0)
    +Polynomial Sequence with 252 Polynomials in 288 Variables
    +
    +
    +
    + +
    +
    +print()
    +

    Print the structure of the cipher into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print()
    +cipher_id = cipher_name_i32_o32_r1
    +cipher_type = permutation
    +cipher_inputs = ['input']
    +cipher_inputs_bit_size = [32]
    +cipher_output_bit_size = 32
    +cipher_number_of_rounds = 1
    +
    +    # round = 0 - round component = 0
    +    id = constant_0_0
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +
    +    # round = 0 - round component = 1
    +    id = constant_0_1
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +cipher_reference_code = None
    +
    +
    +
    + +
    +
    +print_as_python_dictionary()
    +

    Use this function to print the cipher as a python dictionary into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print_as_python_dictionary()
    +cipher = {
    +'cipher_id': 'cipher_name_k32_p32_o32_r1',
    +'cipher_type': 'block_cipher',
    +'cipher_inputs': ['key', 'plaintext'],
    +'cipher_inputs_bit_size': [32, 32],
    +'cipher_output_bit_size': 32,
    +'cipher_number_of_rounds': 1,
    +'cipher_rounds' : [
    +  # round 0
    +  [
    +  {
    +    # round = 0 - round component = 0
    +    'id': 'constant_0_0',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  {
    +    # round = 0 - round component = 1
    +    'id': 'constant_0_1',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  ],
    +  ],
    +'cipher_reference_code': None,
    +}
    +
    +
    +
    + +
    +
    +print_as_python_dictionary_to_file(file_name='')
    +

    Use this function to print the cipher as a python dictionary to a file.

    +

    INPUT:

    +
      +
    • file_namestring; a python string representing a valid file name

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.print_as_python_dictionary_to_file("claasp/ciphers/dictionary_example.py")
    +sage: os.remove("claasp/ciphers/dictionary_example.py")
    +
    +
    +
    + +
    +
    +print_evaluation_python_code(verbosity=False)
    +

    Print the python code that implement the evaluation function of the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().print_evaluation_python_code() # random
    +from copy import copy
    +from bitstring import BitArray
    +from claasp.cipher_modules.generic_functions import *
    +
    +def evaluate(input):
    +    plaintext_output = copy(BitArray(uint=input[0], length=32))
    +    key_output = copy(BitArray(uint=input[1], length=32))
    +    intermediate_output = {}
    +    intermediate_output['cipher_output'] = []
    +    intermediate_output['round_key_output'] = []
    +    components_io = {}
    +    component_input = BitArray(1)
    +
    +    # round: 0, component: 0, component_id: concatenate_0_0
    +    component_input = select_bits(key_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_0_output = component_input
    +    components_io['concatenate_0_0'] = [component_input.uint, concatenate_0_0_output.uint]
    +
    +    # round: 0, component: 1, component_id: intermediate_output_0_1
    +    component_input = select_bits(concatenate_0_0_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    intermediate_output_0_1_output = component_input
    +    intermediate_output['round_key_output'].append(intermediate_output_0_1_output.uint)
    +    components_io['intermediate_output_0_1'] = [component_input.uint, intermediate_output_0_1_output.uint]
    +
    +    # round: 0, component: 2, component_id: concatenate_0_2
    +    component_input = select_bits(plaintext_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_2_output = component_input
    +    components_io['concatenate_0_2'] = [component_input.uint, concatenate_0_2_output.uint]
    +
    +    # round: 0, component: 3, component_id: cipher_output_0_3
    +    component_input = select_bits(concatenate_0_2_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    cipher_output_0_3_output = component_input
    +    intermediate_output['cipher_output'].append(cipher_output_0_3_output.uint)
    +    cipher_output = cipher_output_0_3_output.uint
    +    components_io['cipher_output_0_3'] = [component_input.uint, cipher_output_0_3_output.uint]
    +
    +    return cipher_output, intermediate_output, components_io
    +
    +
    +
    + +
    +
    +print_evaluation_python_code_to_file(file_name)
    +

    Use this function to print the python code to a file.

    +

    INPUT:

    +
      +
    • file_namestring; name of the output file

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity = identity()
    +sage: identity.file_name
    +'identity_block_cipher_p32_k32_o32_r1.py'
    +sage: identity.print_evaluation_python_code_to_file(identity.id + 'evaluation.py') # doctest: +SKIP
    +
    +
    +
    + +
    +
    +print_input_information()
    +

    Print a list of the inputs with their corresponding bit size.

    +
    +
    Possible cipher inputs are:
      +
    • plaintext

    • +
    • key

    • +
    • tweak

    • +
    • initialization vector

    • +
    • nonce

    • +
    • constant

    • +
    • etc.

    • +
    +
    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher()
    +sage: fancy.print_input_information()
    +plaintext of bit size 24
    +key of bit size 24
    +
    +
    +
    + +
    +
    +property reference_code
    +
    + +
    +
    +remove_key_schedule()
    +
    + +
    +
    +remove_round_component(round_id, component)
    +
    + +
    +
    +remove_round_component_from_id(round_id, component_id)
    +
    + +
    +
    +property rounds
    +
    + +
    +
    +property rounds_as_list
    +
    + +
    +
    +set_file_name(file_name)
    +
    + +
    +
    +set_id(cipher_id)
    +
    + +
    +
    +set_inputs(inputs_ids_list, inputs_bit_size_list)
    +
    + +
    +
    +sort_cipher()
    +
    + +
    +
    +test_against_reference_code(number_of_tests=5)
    +

    Test the graph representation against its reference implementation (if available) with random inputs.

    +

    INPUT:

    +
      +
    • number_of_testsinteger (default: 5); number of tests to execute

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher as xtea
    +sage: xtea(number_of_rounds=32).test_against_reference_code()
    +True
    +
    +
    +
    + +
    +
    +test_vector_check(list_of_test_vectors_input, list_of_test_vectors_output)
    +

    Testing the cipher with list of test vectors input and list of test vectors output.

    +

    INPUT:

    +
      +
    • list_of_test_vectors_inputlist; list of input testing vectors

    • +
    • list_of_test_vectors_outputlist; list of the expected output of the corresponding input testing +vectors. That is, list_of_test_vectors_output[i] = cipher.evaluate(list_of_test_vectors_input[i])

    • +
    +

    OUTPUT:

    +
      +
    • test_result – output of the testing. True if all the cipher.evaluate(input)=output for every input

    • +
    +

    test vectors, and False, otherwise.

    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(number_of_rounds=22)
    +sage: key1 = 0x1918111009080100
    +sage: plaintext1 = 0x6574694c
    +sage: ciphertext1 = 0xa86842f2
    +sage: key2 = 0x1918111009080100
    +sage: plaintext2 = 0x6574694d
    +sage: ciphertext2 = 0x2b5f25d6
    +sage: input_list=[[plaintext1, key1], [plaintext2, key2]]
    +sage: output_list=[ciphertext1, ciphertext2]
    +sage: speck.test_vector_check(input_list, output_list)
    +True
    +sage: input_list.append([0x11111111, 0x1111111111111111])
    +sage: output_list.append(0xFFFFFFFF)
    +sage: speck.test_vector_check(input_list, output_list)
    +Testing Failed
    +index: 2
    +input:  [286331153, 1229782938247303441]
    +output:  4294967295
    +False
    +
    +
    +
    + +
    +
    +property type
    +
    + +
    + +
    + +
    + +

    Return a list of zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/ciphers/permutations/gift_permutation.html b/docs/build/html/ciphers/permutations/gift_permutation.html index 9543ab27..fb39e8a4 100644 --- a/docs/build/html/ciphers/permutations/gift_permutation.html +++ b/docs/build/html/ciphers/permutations/gift_permutation.html @@ -1,23 +1,24 @@ - + - Gift permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Gift permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Gift permutation

    +

    Gift permutation

    class GiftPermutation(number_of_rounds=40)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the GIFTPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -224,94 +225,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -350,185 +268,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -561,53 +318,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -652,7 +362,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -666,11 +376,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -731,28 +444,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -788,35 +479,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -835,50 +497,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1149,43 +767,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1342,24 +923,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1499,38 +1062,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1607,70 +1138,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1695,13 +1172,13 @@

    Navigation

    This Page

    @@ -1719,7 +1196,7 @@

    Quick search

    - +
    @@ -1734,10 +1211,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1745,7 +1222,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/gift_sbox_permutation.html b/docs/build/html/ciphers/permutations/gift_sbox_permutation.html index fe3abf2f..86f49860 100644 --- a/docs/build/html/ciphers/permutations/gift_sbox_permutation.html +++ b/docs/build/html/ciphers/permutations/gift_sbox_permutation.html @@ -1,23 +1,24 @@ - + - Gift sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Gift sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Gift sbox permutation

    +

    Gift sbox permutation

    class GiftSboxPermutation(number_of_rounds=40)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the GIFTSboxPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -224,94 +225,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -350,185 +268,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -561,53 +318,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -652,7 +362,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -666,11 +376,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -731,28 +444,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -788,35 +479,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -835,50 +497,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1149,43 +767,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1342,24 +923,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1499,38 +1062,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1607,70 +1138,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1695,13 +1172,13 @@

    Navigation

    This Page

    @@ -1719,7 +1196,7 @@

    Quick search

    - +
    @@ -1734,10 +1211,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1745,7 +1222,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/gimli_permutation.html b/docs/build/html/ciphers/permutations/gimli_permutation.html index 34e8a151..7aa4fe9f 100644 --- a/docs/build/html/ciphers/permutations/gimli_permutation.html +++ b/docs/build/html/ciphers/permutations/gimli_permutation.html @@ -1,23 +1,24 @@ - + - Gimli permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Gimli permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Gimli permutation

    +

    Gimli permutation

    class GimliPermutation(number_of_rounds=24, word_size=32)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the GimliPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1145,43 +763,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1338,24 +919,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1500,38 +1063,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1613,70 +1144,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1711,13 +1188,13 @@

    Navigation

    This Page

    @@ -1735,7 +1212,7 @@

    Quick search

    - +
    @@ -1750,10 +1227,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1761,7 +1238,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/gimli_sbox_permutation.html b/docs/build/html/ciphers/permutations/gimli_sbox_permutation.html index cd694fb7..8b8e651c 100644 --- a/docs/build/html/ciphers/permutations/gimli_sbox_permutation.html +++ b/docs/build/html/ciphers/permutations/gimli_sbox_permutation.html @@ -1,23 +1,24 @@ - + - Gimli sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Gimli sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Gimli sbox permutation

    +

    Gimli sbox permutation

    class GimliSboxPermutation(number_of_rounds=24, word_size=32)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the GimliPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    This version considers the application of 32 parallel 3-bit S-boxes to each column.

    @@ -226,94 +227,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -352,185 +270,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -563,53 +320,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -654,7 +364,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -668,11 +378,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -733,28 +446,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -790,35 +481,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -837,50 +499,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1146,43 +764,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1339,24 +920,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1501,38 +1064,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1614,70 +1145,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1712,13 +1189,13 @@

    Navigation

    This Page

    @@ -1736,7 +1213,7 @@

    Quick search

    - +
    @@ -1751,10 +1228,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1762,7 +1239,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/grain_core_permutation.html b/docs/build/html/ciphers/permutations/grain_core_permutation.html index 8766c388..0e8ba2ea 100644 --- a/docs/build/html/ciphers/permutations/grain_core_permutation.html +++ b/docs/build/html/ciphers/permutations/grain_core_permutation.html @@ -1,23 +1,24 @@ - + - Grain core permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Grain core permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Grain core permutation

    +

    Grain core permutation

    class GrainCorePermutation(number_of_rounds=None)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the GrainCorePermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -226,94 +227,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -352,185 +270,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -563,53 +320,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -654,7 +364,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -668,11 +378,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -733,28 +446,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -790,35 +481,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -837,50 +499,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1146,43 +764,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1339,24 +920,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1491,38 +1054,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1599,70 +1130,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1687,13 +1164,13 @@

    Navigation

    This Page

    @@ -1711,7 +1188,7 @@

    Quick search

    - +
    @@ -1726,10 +1203,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1737,7 +1214,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/keccak_invertible_permutation.html b/docs/build/html/ciphers/permutations/keccak_invertible_permutation.html index 5d5932f2..47fe373e 100644 --- a/docs/build/html/ciphers/permutations/keccak_invertible_permutation.html +++ b/docs/build/html/ciphers/permutations/keccak_invertible_permutation.html @@ -1,23 +1,24 @@ - + - Keccak invertible permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Keccak invertible permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Keccak invertible permutation

    +

    Keccak invertible permutation

    class KeccakInvertiblePermutation(number_of_rounds=24, word_size=64)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the KeccakInvertiblePermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher. https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf

    @@ -231,94 +232,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    chi_definition(b)
    @@ -362,185 +280,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -573,53 +330,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -664,7 +374,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -678,11 +388,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -743,28 +456,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -800,35 +491,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -847,50 +509,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1166,43 +784,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1359,24 +940,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1521,38 +1084,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1639,70 +1170,16 @@

    Navigation

    theta_definition(state, word_size)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1727,13 +1204,13 @@

    Navigation

    This Page

    @@ -1751,7 +1228,7 @@

    Quick search

    - +
    @@ -1766,10 +1243,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1777,7 +1254,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/keccak_permutation.html b/docs/build/html/ciphers/permutations/keccak_permutation.html index 61927fb1..970fcdbd 100644 --- a/docs/build/html/ciphers/permutations/keccak_permutation.html +++ b/docs/build/html/ciphers/permutations/keccak_permutation.html @@ -1,23 +1,24 @@ - + - Keccak permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Keccak permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Keccak permutation

    +

    Keccak permutation

    class KeccakPermutation(number_of_rounds=24, word_size=64)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the KeccakPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -240,94 +241,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    chi_definition(b, states)
    @@ -371,185 +289,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -582,53 +339,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -673,7 +383,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -687,11 +397,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -752,28 +465,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -809,35 +500,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -856,50 +518,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1175,43 +793,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1368,24 +949,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1530,38 +1093,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1648,70 +1179,16 @@

    Navigation

    theta_definition(states)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1736,13 +1213,13 @@

    Navigation

    Next topic

    -

    Util

    +

    Ascon permutation

    This Page

    @@ -1760,7 +1237,7 @@

    Quick search

    - +
    @@ -1775,10 +1252,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1786,7 +1263,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/keccak_sbox_permutation.html b/docs/build/html/ciphers/permutations/keccak_sbox_permutation.html index e0ebf366..80dc628a 100644 --- a/docs/build/html/ciphers/permutations/keccak_sbox_permutation.html +++ b/docs/build/html/ciphers/permutations/keccak_sbox_permutation.html @@ -1,23 +1,24 @@ - + - Keccak sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Keccak sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Keccak sbox permutation

    +

    Keccak sbox permutation

    class KeccakSboxPermutation(number_of_rounds=24, word_size=64)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the KeccakSboxPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding.

    INPUT:

    @@ -230,94 +231,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    chi_definition(b)
    @@ -361,185 +279,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -572,53 +329,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -663,7 +373,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -677,11 +387,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -742,28 +455,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -799,35 +490,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -846,50 +508,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1165,43 +783,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1358,24 +939,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1520,38 +1083,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1638,70 +1169,16 @@

    Navigation

    theta_definition(state)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1726,8 +1203,8 @@

    Navigation

    Next topic

    @@ -1750,7 +1227,7 @@

    Quick search

    - +
    @@ -1768,7 +1245,7 @@

    Navigation

    next |
  • - previous |
  • @@ -1776,7 +1253,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/photon_permutation.html b/docs/build/html/ciphers/permutations/photon_permutation.html index a5e0c869..dbf092df 100644 --- a/docs/build/html/ciphers/permutations/photon_permutation.html +++ b/docs/build/html/ciphers/permutations/photon_permutation.html @@ -1,22 +1,23 @@ - + - Photon permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Photon permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Photon permutation

    +

    Photon permutation

    class PhotonPermutation(t=256, number_of_rounds=12)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the PhotonPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -224,94 +225,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -350,185 +268,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -561,53 +318,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -652,7 +362,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -666,11 +376,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -731,28 +444,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -788,35 +479,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -835,50 +497,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1144,43 +762,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1337,24 +918,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1494,38 +1057,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1602,70 +1133,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1695,8 +1172,8 @@

    Previous topic

    This Page

    @@ -1714,7 +1191,7 @@

    Quick search

    - +
    @@ -1729,7 +1206,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/ciphers/permutations/salsa_permutation.html b/docs/build/html/ciphers/permutations/salsa_permutation.html index 3dd7ba3e..3e37dc5c 100644 --- a/docs/build/html/ciphers/permutations/salsa_permutation.html +++ b/docs/build/html/ciphers/permutations/salsa_permutation.html @@ -1,23 +1,24 @@ - + - Salsa permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Salsa permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Salsa permutation

    +

    Salsa permutation

    -class SalsaPermutation(number_of_rounds=0, state_of_components=None, cipher_family='salsa_permutation', cipher_type='permutation', inputs=None, cipher_inputs_bit_size=None, rotations=[13, 18, 7, 9], word_size=32, start_round='odd')
    -

    Bases: claasp.cipher.Cipher

    +class SalsaPermutation(number_of_rounds=0, state_of_components=None, cipher_family='salsa_permutation', cipher_type='permutation', inputs=None, cipher_inputs_bit_size=None, rotations=[13, 18, 7, 9], word_size=32, start_round=('odd', 'top')) +

    Bases: Cipher

    Construct an instance of the SalsaPermutation class.

    This class is used to store compact representations of a permutation, used to generate the corresponding cipher.

    INPUT:

    @@ -74,7 +75,7 @@

    Navigation

  • cipher_inputs_bit_sizeinteger (default: None)

  • rotationslist of integer (default: [8, 7, 16, 12])

  • word_sizeinteger (default: 32)

  • -
  • start_roundstring (default: odd)

  • +
  • start_roundtuple of strings (default: (odd, top)

  • EXAMPLES:

    sage: from claasp.ciphers.permutations.salsa_permutation import SalsaPermutation
    @@ -228,94 +229,11 @@ 

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    bottom_half_quarter_round(a, b, c, d, state)
    @@ -359,185 +277,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -570,53 +327,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -661,7 +371,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -675,11 +385,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -740,28 +453,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -797,35 +488,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -844,50 +506,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1153,43 +771,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1346,24 +927,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1498,38 +1061,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1611,70 +1142,16 @@

    Navigation

    top_half_quarter_round(a, b, c, d, state)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1699,13 +1176,13 @@

    Navigation

    This Page

    @@ -1723,7 +1200,7 @@

    Quick search

    - +
    @@ -1738,10 +1215,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1749,7 +1226,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/sparkle_permutation.html b/docs/build/html/ciphers/permutations/sparkle_permutation.html index 3ff0d4ff..0c8bd220 100644 --- a/docs/build/html/ciphers/permutations/sparkle_permutation.html +++ b/docs/build/html/ciphers/permutations/sparkle_permutation.html @@ -1,23 +1,24 @@ - + - Sparkle permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sparkle permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Sparkle permutation

    +

    Sparkle permutation

    class SparklePermutation(number_of_blocks=4, number_of_steps=7)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the SparklePermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -223,36 +224,6 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    -
    alzette(state_x, state_y, ci)
    @@ -263,64 +234,11 @@

    Navigation

    alzette_round(state_x, state_y, rotate_x, rotate_y, ci)
    -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -359,185 +277,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -570,53 +327,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    ell_function(state_i)
    @@ -666,7 +376,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -680,11 +390,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -745,28 +458,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -802,35 +493,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -849,50 +511,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1163,43 +781,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1356,24 +937,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1513,38 +1076,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1621,70 +1152,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1709,13 +1186,13 @@

    Navigation

    This Page

    @@ -1733,7 +1210,7 @@

    Quick search

    - +
    @@ -1748,10 +1225,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1759,7 +1236,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/spongent_pi_fsr_permutation.html b/docs/build/html/ciphers/permutations/spongent_pi_fsr_permutation.html index 2ecf7743..f99e4044 100644 --- a/docs/build/html/ciphers/permutations/spongent_pi_fsr_permutation.html +++ b/docs/build/html/ciphers/permutations/spongent_pi_fsr_permutation.html @@ -1,23 +1,24 @@ - + - Spongent pi fsr permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Spongent pi fsr permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Spongent pi fsr permutation

    +

    Spongent pi fsr permutation

    class SpongentPiFSRPermutation(state_bit_size=160, number_of_rounds=80)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the SpongentPiFSRPermutation class with FSR component.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1150,43 +768,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1343,24 +924,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1500,38 +1063,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1608,70 +1139,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1696,13 +1173,13 @@

    Navigation

    - +
    @@ -1735,10 +1212,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1746,7 +1223,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/spongent_pi_permutation.html b/docs/build/html/ciphers/permutations/spongent_pi_permutation.html index 6f2b1809..478f52a8 100644 --- a/docs/build/html/ciphers/permutations/spongent_pi_permutation.html +++ b/docs/build/html/ciphers/permutations/spongent_pi_permutation.html @@ -1,23 +1,24 @@ - + - Spongent pi permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Spongent pi permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Spongent pi permutation

    +

    Spongent pi permutation

    class SpongentPiPermutation(state_bit_size=160, number_of_rounds=80)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the SpongentPiPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1150,43 +768,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1343,24 +924,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1500,38 +1063,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1608,70 +1139,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1696,13 +1173,13 @@

    Navigation

    This Page

    @@ -1720,7 +1197,7 @@

    Quick search

    - +
    @@ -1735,10 +1212,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1746,7 +1223,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/spongent_pi_precomputation_permutation.html b/docs/build/html/ciphers/permutations/spongent_pi_precomputation_permutation.html index 9b2f5d5f..a54cd46c 100644 --- a/docs/build/html/ciphers/permutations/spongent_pi_precomputation_permutation.html +++ b/docs/build/html/ciphers/permutations/spongent_pi_precomputation_permutation.html @@ -1,23 +1,24 @@ - + - Spongent pi precomputation permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Spongent pi precomputation permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Spongent pi precomputation permutation

    +

    Spongent pi precomputation permutation

    class SpongentPiPrecomputationPermutation(state_bit_size=160, number_of_rounds=80)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the SpongentPiPrecomputationPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1145,43 +763,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1338,24 +919,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1495,38 +1058,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1603,70 +1134,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1691,13 +1168,13 @@

    Navigation

    This Page

    @@ -1715,7 +1192,7 @@

    Quick search

    - +
    @@ -1730,10 +1207,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1741,7 +1218,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/tinyjambu_32bits_word_permutation.html b/docs/build/html/ciphers/permutations/tinyjambu_32bits_word_permutation.html index 29e5f03b..568d6c8d 100644 --- a/docs/build/html/ciphers/permutations/tinyjambu_32bits_word_permutation.html +++ b/docs/build/html/ciphers/permutations/tinyjambu_32bits_word_permutation.html @@ -1,23 +1,24 @@ - + - Tinyjambu 32bits word permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Tinyjambu 32bits word permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Tinyjambu 32bits word permutation

    +

    Tinyjambu 32bits word permutation

    class TinyJambuWordBasedPermutation(key_bit_size=128, number_of_rounds=640)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the TinyJambuWordBasedPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1145,43 +763,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1338,24 +919,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1495,38 +1058,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1603,70 +1134,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1691,13 +1168,13 @@

    Navigation

    This Page

    @@ -1715,7 +1192,7 @@

    Quick search

    - +
    @@ -1730,10 +1207,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1741,7 +1218,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/tinyjambu_fsr_32bits_word_permutation.html b/docs/build/html/ciphers/permutations/tinyjambu_fsr_32bits_word_permutation.html index f9f1da15..7ffbf0c5 100644 --- a/docs/build/html/ciphers/permutations/tinyjambu_fsr_32bits_word_permutation.html +++ b/docs/build/html/ciphers/permutations/tinyjambu_fsr_32bits_word_permutation.html @@ -1,23 +1,24 @@ - + - Tinyjambu fsr 32bits word permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Tinyjambu fsr 32bits word permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Tinyjambu fsr 32bits word permutation

    +

    Tinyjambu fsr 32bits word permutation

    class TinyJambuFSRWordBasedPermutation(key_bit_size=128, number_of_rounds=640)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the TinyJambuFSRWordBasedPermutation class with fsr component.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1145,43 +763,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1338,24 +919,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1495,38 +1058,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1603,70 +1134,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1691,13 +1168,13 @@

    Navigation

    Previous topic

    -

    Sparkle permutation

    +

    Util

    This Page

    @@ -1715,7 +1192,7 @@

    Quick search

    - +
    @@ -1730,10 +1207,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1741,7 +1218,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/tinyjambu_permutation.html b/docs/build/html/ciphers/permutations/tinyjambu_permutation.html index 23e44e53..6929fed2 100644 --- a/docs/build/html/ciphers/permutations/tinyjambu_permutation.html +++ b/docs/build/html/ciphers/permutations/tinyjambu_permutation.html @@ -1,23 +1,24 @@ - + - Tinyjambu permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Tinyjambu permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Tinyjambu permutation

    +

    Tinyjambu permutation

    class TinyJambuPermutation(key_bit_size=128, number_of_rounds=640)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the TinyJambuPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1145,43 +763,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1338,24 +919,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1495,38 +1058,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1603,70 +1134,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1691,13 +1168,13 @@

    Navigation

    Previous topic

    -

    Util

    +

    Photon permutation

    This Page

    @@ -1715,7 +1192,7 @@

    Quick search

    - +
    @@ -1730,10 +1207,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1741,7 +1218,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/util.html b/docs/build/html/ciphers/permutations/util.html index cf10ad45..793b2691 100644 --- a/docs/build/html/ciphers/permutations/util.html +++ b/docs/build/html/ciphers/permutations/util.html @@ -1,23 +1,24 @@ - + - Util — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Util — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Util

    +

    Util

    add_intermediate_output_component_latin_dances_permutations(permutation, round_i, number_of_rounds)
    @@ -82,6 +83,11 @@

    Navigation

    init_state_latin_dances(permutation, input_plaintext)
    +
    +
    +print_state_ids(state)
    +
    +
    sub_quarter_round_latin_dances(permutation, state, p1_index, p2_index, p3_index, rot_amount, cipher_name)
    @@ -98,13 +104,13 @@

    Navigation

    This Page

    @@ -122,7 +128,7 @@

    Quick search

    - +
    @@ -137,10 +143,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -148,7 +154,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/xoodoo_invertible_permutation.html b/docs/build/html/ciphers/permutations/xoodoo_invertible_permutation.html index 6fb7032d..4ef769df 100644 --- a/docs/build/html/ciphers/permutations/xoodoo_invertible_permutation.html +++ b/docs/build/html/ciphers/permutations/xoodoo_invertible_permutation.html @@ -1,23 +1,24 @@ - + - Xoodoo invertible permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Xoodoo invertible permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Xoodoo invertible permutation

    +

    Xoodoo invertible permutation

    class XoodooInvertiblePermutation(number_of_rounds=12)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the XoodooInvertiblePermutation class.

    This class is used to store compact representations of a cipher.

    INPUT:

    @@ -227,64 +228,6 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    apply_sbox_to_each_3bit_column(planes, planes_new)
    @@ -295,31 +238,6 @@

    Navigation

    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    chi_definition(planes)
    @@ -363,185 +281,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -574,53 +331,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -665,7 +375,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -679,11 +389,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -744,28 +457,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -801,35 +492,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -848,50 +510,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1162,43 +780,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1355,24 +936,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1527,38 +1090,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1640,70 +1171,16 @@

    Navigation

    theta_definition(planes)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1728,13 +1205,13 @@

    Navigation

    This Page

    @@ -1752,7 +1229,7 @@

    Quick search

    - +
    @@ -1767,10 +1244,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1778,7 +1255,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/xoodoo_permutation.html b/docs/build/html/ciphers/permutations/xoodoo_permutation.html index e6428f5a..40d40a24 100644 --- a/docs/build/html/ciphers/permutations/xoodoo_permutation.html +++ b/docs/build/html/ciphers/permutations/xoodoo_permutation.html @@ -1,23 +1,24 @@ - + - Xoodoo permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Xoodoo permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Xoodoo permutation

    +

    Xoodoo permutation

    class XoodooPermutation(number_of_rounds=3)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the XoodooPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -237,94 +238,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    chi_definition(planes)
    @@ -368,185 +286,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -579,53 +336,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -670,7 +380,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -684,11 +394,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -749,28 +462,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -806,35 +497,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -853,50 +515,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1167,43 +785,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1360,24 +941,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1532,38 +1095,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1645,70 +1176,16 @@

    Navigation

    theta_definition(planes)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1733,13 +1210,13 @@

    Navigation

    This Page

    @@ -1757,7 +1234,7 @@

    Quick search

    - +
    @@ -1772,10 +1249,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1783,7 +1260,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/permutations/xoodoo_sbox_permutation.html b/docs/build/html/ciphers/permutations/xoodoo_sbox_permutation.html index 065c402b..f580e289 100644 --- a/docs/build/html/ciphers/permutations/xoodoo_sbox_permutation.html +++ b/docs/build/html/ciphers/permutations/xoodoo_sbox_permutation.html @@ -1,23 +1,24 @@ - + - Xoodoo sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Xoodoo sbox permutation — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Xoodoo sbox permutation

    +

    Xoodoo sbox permutation

    class XoodooSboxPermutation(number_of_rounds=12)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the XoodooSboxPermutation class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -227,64 +228,6 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    apply_sbox_to_each_3bit_column(planes, planes_new)
    @@ -295,31 +238,6 @@

    Navigation

    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    chi_definition(planes)
    @@ -363,185 +281,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -574,53 +331,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -665,7 +375,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -679,11 +389,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -744,28 +457,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -801,35 +492,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -848,50 +510,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1162,43 +780,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1355,24 +936,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1527,38 +1090,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1640,70 +1171,16 @@

    Navigation

    theta_definition(planes)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1728,8 +1205,8 @@

    Navigation

    Next topic

    @@ -1752,7 +1229,7 @@

    Quick search

    - +
    @@ -1770,7 +1247,7 @@

    Navigation

    next |
  • - previous |
  • @@ -1778,7 +1255,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/stream_ciphers/a5_1_stream_cipher.html b/docs/build/html/ciphers/stream_ciphers/a5_1_stream_cipher.html index b9f420d0..6c7251b9 100644 --- a/docs/build/html/ciphers/stream_ciphers/a5_1_stream_cipher.html +++ b/docs/build/html/ciphers/stream_ciphers/a5_1_stream_cipher.html @@ -1,23 +1,24 @@ - + - A5 1 stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + A5 1 stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    A5 1 stream cipher

    +

    A5 1 stream cipher

    class A51StreamCipher(key_bit_size=64, frame_bit_size=22, number_of_normal_clocks_at_initialization=100, number_of_rounds=228)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the A51StreamCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -234,94 +235,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -360,185 +278,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -571,53 +328,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -662,7 +372,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -676,11 +386,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -741,28 +454,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -798,35 +489,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -845,50 +507,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1154,43 +772,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1347,24 +928,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1509,38 +1072,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1617,70 +1148,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1705,13 +1182,13 @@

    Navigation

    This Page

    @@ -1729,7 +1206,7 @@

    Quick search

    - +
    @@ -1744,10 +1221,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1755,7 +1232,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/stream_ciphers/a5_2_stream_cipher.html b/docs/build/html/ciphers/stream_ciphers/a5_2_stream_cipher.html new file mode 100644 index 00000000..b5b53b89 --- /dev/null +++ b/docs/build/html/ciphers/stream_ciphers/a5_2_stream_cipher.html @@ -0,0 +1,1324 @@ + + + + + + + + + A5 2 stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    + +
    +

    A5 2 stream cipher

    +
    +
    +class A52StreamCipher(key_bit_size=64, frame_bit_size=22, number_of_normal_clocks_at_initialization=100, number_of_rounds=228)
    +

    Bases: Cipher

    +

    Construct an instance of the A52StreamCipher class.

    +

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    +

    INPUT:

    +
    +
      +
    • key_bit_sizeinteger (default: 128); cipher key bit size of the cipher

    • +
    • number_of_roundsinteger (default: 640); number of rounds of the cipher

    • +
    +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.stream_ciphers.a5_2_stream_cipher import A52StreamCipher
    +sage: a52 = A52StreamCipher()
    +sage: a52.number_of_rounds
    +229
    +
    +sage: a52.component_from(0, 0).id
    +'constant_0_0'
    +
    +sage: a52.component_from(1, 0).id
    +'fsr_1_0'
    +
    +sage: key = 0x003fffffffffffff
    +sage: frame = 0b1000010000000000000000
    +sage: keystream = 0xf4512cac13593764460b722dadd51200350ca385a853735ee5c889944
    +sage: a52.evaluate([key, frame]) == keystream
    +True
    +
    +
    +
    +
    +add_AND_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_FSR_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_MODADD_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_MODSUB_component(input_id_links, input_bit_positions, output_bit_size, modulus=None)
    +
    + +
    +
    +add_NOT_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_OR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_SBOX_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_SHIFT_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_XOR_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_cipher_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_concatenate_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_constant_component(output_bit_size, value)
    +
    + +
    +
    +add_intermediate_output_component(input_id_links, input_bit_positions, output_bit_size, output_tag)
    +
    + +
    +
    +add_linear_layer_component(input_id_links, input_bit_positions, output_bit_size, description)
    +
    + +
    +
    +add_mix_column_component(input_id_links, input_bit_positions, output_bit_size, mix_column_description)
    +
    + +
    +
    +add_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description)
    +
    + +
    +
    +add_reverse_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_round()
    +
    + +
    +
    +add_round_key_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_round_output_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_shift_rows_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_sigma_component(input_id_links, input_bit_positions, output_bit_size, rotation_amounts_parameter)
    +
    + +
    +
    +add_suffix_to_components(suffix, component_id_list=None)
    +
    + +
    +
    +add_theta_keccak_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_theta_xoodoo_component(input_id_links, input_bit_positions, output_bit_size)
    +
    + +
    +
    +add_variable_rotate_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_variable_shift_component(input_id_links, input_bit_positions, output_bit_size, parameter)
    +
    + +
    +
    +add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    +
    + +
    +
    +as_python_dictionary()
    +
    + +
    +
    +cipher_inverse()
    +

    Return the graph representation of the inverse of the cipher under analysis

    +

    EXAMPLE:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: cipher = SpeckBlockCipher(number_of_rounds=2)
    +sage: ciphertext = cipher.evaluate([plaintext, key])
    +sage: cipher_inv = cipher.cipher_inverse()
    +sage: cipher_inv.evaluate([ciphertext, key]) == plaintext
    +True
    +
    +
    +
    + +
    +
    +cipher_partial_inverse(start_round=None, end_round=None, keep_key_schedule=False)
    +

    Returns the inverted portion of a cipher.

    +

    INPUT:

    +
      +
    • start_roundinteger; initial round number of the partial cipher

    • +
    • end_roundinteger; final round number of the partial cipher

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: key = 0xabcdef01abcdef01
    +sage: plaintext = 0x01234567
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: result = speck.evaluate([plaintext, key], intermediate_output=True)
    +sage: partial_speck = speck.cipher_partial_inverse(1, 2)
    +sage: partial_speck.evaluate([result[0], key]) == result[2]['intermediate_output_0_6'][0]
    +
    +
    +
    + +
    +
    +component_from(round_number, index)
    +
    + +
    +
    +convert_to_compound_xor_cipher()
    +
    + +
    +
    +create_networx_graph_from_input_ids()
    +
    + +
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    +
    + +
    +
    +property current_round
    +
    + +
    +
    +property current_round_number
    +
    + +
    +
    +property current_round_number_of_components
    +
    + +
    +
    +delete_generated_evaluate_c_shared_library()
    +

    Delete the file named <id_cipher>_evaluate.c and the corresponding executable.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().delete_generated_evaluate_c_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +evaluate(cipher_input, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs

    • +
    • intermediate_outputboolean (default: False); set this flag to True to return a dictionary with +each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True to print the input/output of each +component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().evaluate([0x01234567,0x89ABCDEF])
    +19088743
    +
    +
    +
    + +
    +
    +evaluate_using_c(inputs, intermediate_output=False, verbosity=False)
    +

    Return the output of the cipher.

    +

    INPUT:

    +
      +
    • inputs

    • +
    • intermediate_outputboolean (default: False); Set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); Set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy(number_of_rounds=2).evaluate_using_c([0x012345,0x89ABCD], True) # random
    +{'round_key_output': [3502917, 73728],
    + 'round_output': [9834215],
    + 'cipher_output': [7457252]}
    +
    +
    +
    + +
    +
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)
    +

    Return the output of the cipher for multiple inputs.

    +

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, +and cipher_inputs[1] the second. +Each of the inputs is given as a numpy ndarray of np.uint8, of shape n*m, where n is the size +(in bytes) of the input, and m is the number of samples.

    +

    The return is a list of m*n ndarrays (format transposed compared to the input format), +where the list is of size 1 if intermediate_output is False, and NUMBER_OF_ROUNDS otherwise.

    +

    This function determines automatically if a bit-based evaluation is required, +and does the transformation transparently. The inputs and outputs are similar to evaluate_vectorized_byte.

    +

    INPUT:

    +
      +
    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, +with m the number of inputs to evaluate)

    • +
    • intermediate_outputboolean (default: False)

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    • +
    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    +
    sage: import numpy as np
    +sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    +sage: K=np.random.randint(256, size=(8,2), dtype=np.uint8)
    +sage: X=np.random.randint(256, size=(4,2), dtype=np.uint8)
    +sage: result=speck.evaluate_vectorized([X, K])
    +sage: K0Lib=int.from_bytes(K[:,0].tobytes(), byteorder='big')
    +sage: K1Lib=int.from_bytes(K[:,1].tobytes(), byteorder='big')
    +sage: X0Lib=int.from_bytes(X[:,0].tobytes(), byteorder='big')
    +sage: X1Lib=int.from_bytes(X[:,1].tobytes(), byteorder='big')
    +sage: C0Lib=speck.evaluate([X0Lib, K0Lib])
    +sage: C1Lib=speck.evaluate([X1Lib, K1Lib])
    +sage: int.from_bytes(result[-1][0].tobytes(), byteorder='big') == C0Lib
    +True
    +sage: int.from_bytes(result[-1][1].tobytes(), byteorder='big') == C1Lib
    +True
    +
    +
    +
    + +
    +
    +evaluate_with_intermediate_outputs_continuous_diffusion_analysis(cipher_input, sbox_precomputations, sbox_precomputations_mix_columns, verbosity=False)
    +

    Return the output of the continuous generalized cipher.

    +

    INPUT:

    +
      +
    • cipher_inputlist of Decimal; block cipher input message

    • +
    • sbox_precomputations dictionary

    • +
    • sbox_precomputations_mix_columns dictionary

    • +
    • verbosityboolean (default: False); set this flag to True in order to print the input/output of +each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: from decimal import *
    +sage: plaintext_input = [Decimal('1') for i in range(32)]
    +sage: plaintext_input[10] = Decimal('0.802999073954890452142763024312444031238555908203125')
    +sage: key_input = [Decimal('-1') for i in range(64)]
    +sage: cipher_inputs = [plaintext_input, key_input]
    +sage: output = speck(number_of_rounds=2).evaluate_with_intermediate_outputs_continuous_diffusion_analysis(
    +....:     cipher_inputs,
    +....:     {},
    +....:     {}
    +....: )
    +sage: output[0][0] == Decimal('-1.000000000')
    +True
    +
    +
    +
    + +
    +
    +property family_name
    +
    + +
    +
    +property file_name
    +
    + +
    +
    +find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    +

    From [SGLYTQH2017] : Finds impossible differentials or zero-correlation linear approximations (based on type) +by fixing the input and output iteratively to all possible Hamming weight 1 value, and asking the solver +to find a solution; if none is found, then the propagation is impossible. +Return a list of impossible differentials or zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • typestring; {“differential”, “linear”}: the type of property to search for

    • +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    +
    +generate_bit_based_c_code(intermediate_output=False, verbosity=False)
    +

    Return a string containing the C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: s = fancy().generate_bit_based_c_code()
    +sage: s[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    +

    Store the C code in a file named <id_cipher>_evaluate.c, and build the corresponding executable.

    +

    INPUT:

    +
      +
    • intermediate_outputboolean (default: False); set this flag to True in order to make the C code +print a dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the C code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher as fancy
    +sage: fancy().generate_evaluate_c_code_shared_library() # doctest: +SKIP
    +
    +
    +
    + +
    +
    +generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    +

    Return a string containing the optimized C code that defines the self.evaluate() method.

    +

    INPUT:

    +
      +
    • word_sizeinteger; the size of the word

    • +
    • intermediate_outputboolean (default: False); set this flag to True in order to return a +dictionary with each intermediate output

    • +
    • verbosityboolean (default: False); set this flag to True in order to make the code print the +input/output of each component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: word_based_c_code = speck().generate_word_based_c_code(20)
    +sage: word_based_c_code[:8] == '#include'
    +True
    +
    +
    +
    + +
    +
    +get_all_components()
    +
    + +
    +
    +get_all_components_ids()
    +
    + +
    +
    +get_all_inputs_bit_positions()
    +
    + +
    +
    +get_component_from_id(component_id)
    +

    Return the component according to the id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: component = fancy.get_component_from_id('sbox_0_0')
    +sage: component.description
    +[0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15]
    +
    +
    +
    + +
    +
    +get_components_in_round(round_number)
    +
    + +
    +
    +get_current_component_id()
    +

    Use this function to get the current component id.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [4], 4)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(4, 0xF)
    +sage: constant_0_1 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.add_round()
    +sage: constant_1_0 = cipher.add_constant_component(4, 0xF)
    +sage: cipher.get_current_component_id()
    +'constant_1_0'
    +
    +
    +
    + +
    +
    +get_model(technique, problem)
    +

    Returns a model for a given technique and problem.

    +

    INPUT:

    +
    +
      +
    • techniquestring ; sat, smt, milp or cp

    • +
    • problemstring ; xor_differential, xor_linear, cipher_model (more to be added as more model types are added to the library)

    • +
    +
    +
    + +
    +
    +get_number_of_components_in_round(round_number)
    +
    + +
    +
    +get_partial_cipher(start_round=None, end_round=None, keep_key_schedule=True)
    +
    + +
    +
    +get_round_from_component_id(component_id)
    +

    Return the round according to the round of the component id given as input.

    +

    INPUT:

    +
      +
    • id_componentstring; id of a component

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=2)
    +sage: fancy.get_round_from_component_id('xor_1_14')
    +1
    +
    +
    +
    + +
    +
    +get_sizes_of_components_by_type()
    +
    + +
    +
    +property id
    +
    + +
    + +

    Return a list of impossible differentials if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    • scenariostring; the type of impossible differentials to search, single-key or related-key

    • +
    +
    + +
    +
    +property inputs
    +
    + +
    +
    +property inputs_bit_size
    +
    + +
    +
    +inputs_size_to_dict()
    +
    + +
    +
    +is_algebraically_secure(timeout)
    +

    Return True if the cipher is resistant against algebraic attack.

    +

    INPUT:

    +
      +
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • +
    +
    + +
    +
    +is_andrx()
    +

    Return True if the cipher is AndRX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_andrx()
    +False
    +
    +
    +
    + +
    +
    +is_arx()
    +

    Return True if the cipher is ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=20)
    +sage: midori.is_arx()
    +False
    +
    +
    +
    + +
    +
    +is_power_of_2_word_based()
    +

    Return the word size if the cipher is word based (64, 32, 16 or 8 bits), False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: XTeaBlockCipher(number_of_rounds=32).is_power_of_2_word_based()
    +32
    +sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: MidoriBlockCipher(number_of_rounds=16).is_power_of_2_word_based()
    +False
    +
    +
    +
    + +
    +
    +is_shift_arx()
    +

    Return True if the cipher is Shift-ARX, False otherwise.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher
    +sage: xtea = XTeaBlockCipher(number_of_rounds=32)
    +sage: xtea.is_shift_arx()
    +True
    +
    +
    +
    + +
    +
    +is_spn()
    +

    Return True if the cipher is SPN.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    +sage: aes = AESBlockCipher(number_of_rounds=2)
    +sage: aes.is_spn()
    +True
    +
    +
    +
    + +
    +
    +make_cipher_id()
    +
    + +
    +
    +make_file_name()
    +
    + +
    +
    +property number_of_rounds
    +
    + +
    +
    +property output_bit_size
    +
    + +
    +
    +polynomial_system()
    +

    Return a polynomial system for the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    +sage: IdentityBlockCipher().polynomial_system()
    +Polynomial Sequence with 128 Polynomials in 256 Variables
    +
    +
    +
    + +
    +
    +polynomial_system_at_round(r)
    +

    Return a polynomial system for the cipher at round r.

    +

    INPUT:

    +
      +
    • rinteger; round index

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: FancyBlockCipher(number_of_rounds=1).polynomial_system_at_round(0)
    +Polynomial Sequence with 252 Polynomials in 288 Variables
    +
    +
    +
    + +
    +
    +print()
    +

    Print the structure of the cipher into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print()
    +cipher_id = cipher_name_i32_o32_r1
    +cipher_type = permutation
    +cipher_inputs = ['input']
    +cipher_inputs_bit_size = [32]
    +cipher_output_bit_size = 32
    +cipher_number_of_rounds = 1
    +
    +    # round = 0 - round component = 0
    +    id = constant_0_0
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +
    +    # round = 0 - round component = 1
    +    id = constant_0_1
    +    type = constant
    +    input_bit_size = 0
    +    input_id_link = ['']
    +    input_bit_positions = [[]]
    +    output_bit_size = 16
    +    description = ['0xab01']
    +cipher_reference_code = None
    +
    +
    +
    + +
    +
    +print_as_python_dictionary()
    +

    Use this function to print the cipher as a python dictionary into the sage terminal.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.add_round()
    +sage: constant_0_0 = cipher.add_constant_component(16, 0xAB01)
    +sage: constant_0_1 = cipher.add_constant_component(16, 0xAB01)
    +sage: cipher.print_as_python_dictionary()
    +cipher = {
    +'cipher_id': 'cipher_name_k32_p32_o32_r1',
    +'cipher_type': 'block_cipher',
    +'cipher_inputs': ['key', 'plaintext'],
    +'cipher_inputs_bit_size': [32, 32],
    +'cipher_output_bit_size': 32,
    +'cipher_number_of_rounds': 1,
    +'cipher_rounds' : [
    +  # round 0
    +  [
    +  {
    +    # round = 0 - round component = 0
    +    'id': 'constant_0_0',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  {
    +    # round = 0 - round component = 1
    +    'id': 'constant_0_1',
    +    'type': 'constant',
    +    'input_bit_size': 0,
    +    'input_id_link': [''],
    +    'input_bit_positions': [[]],
    +    'output_bit_size': 16,
    +    'description': ['0xab01'],
    +  },
    +  ],
    +  ],
    +'cipher_reference_code': None,
    +}
    +
    +
    +
    + +
    +
    +print_as_python_dictionary_to_file(file_name='')
    +

    Use this function to print the cipher as a python dictionary to a file.

    +

    INPUT:

    +
      +
    • file_namestring; a python string representing a valid file name

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "block_cipher", ["key", "plaintext"], [32, 32], 32)
    +sage: cipher.print_as_python_dictionary_to_file("claasp/ciphers/dictionary_example.py")
    +sage: os.remove("claasp/ciphers/dictionary_example.py")
    +
    +
    +
    + +
    +
    +print_evaluation_python_code(verbosity=False)
    +

    Print the python code that implement the evaluation function of the cipher.

    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity().print_evaluation_python_code() # random
    +from copy import copy
    +from bitstring import BitArray
    +from claasp.cipher_modules.generic_functions import *
    +
    +def evaluate(input):
    +    plaintext_output = copy(BitArray(uint=input[0], length=32))
    +    key_output = copy(BitArray(uint=input[1], length=32))
    +    intermediate_output = {}
    +    intermediate_output['cipher_output'] = []
    +    intermediate_output['round_key_output'] = []
    +    components_io = {}
    +    component_input = BitArray(1)
    +
    +    # round: 0, component: 0, component_id: concatenate_0_0
    +    component_input = select_bits(key_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_0_output = component_input
    +    components_io['concatenate_0_0'] = [component_input.uint, concatenate_0_0_output.uint]
    +
    +    # round: 0, component: 1, component_id: intermediate_output_0_1
    +    component_input = select_bits(concatenate_0_0_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    intermediate_output_0_1_output = component_input
    +    intermediate_output['round_key_output'].append(intermediate_output_0_1_output.uint)
    +    components_io['intermediate_output_0_1'] = [component_input.uint, intermediate_output_0_1_output.uint]
    +
    +    # round: 0, component: 2, component_id: concatenate_0_2
    +    component_input = select_bits(plaintext_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    concatenate_0_2_output = component_input
    +    components_io['concatenate_0_2'] = [component_input.uint, concatenate_0_2_output.uint]
    +
    +    # round: 0, component: 3, component_id: cipher_output_0_3
    +    component_input = select_bits(concatenate_0_2_output, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
    +    output_bit_size = 32
    +    cipher_output_0_3_output = component_input
    +    intermediate_output['cipher_output'].append(cipher_output_0_3_output.uint)
    +    cipher_output = cipher_output_0_3_output.uint
    +    components_io['cipher_output_0_3'] = [component_input.uint, cipher_output_0_3_output.uint]
    +
    +    return cipher_output, intermediate_output, components_io
    +
    +
    +
    + +
    +
    +print_evaluation_python_code_to_file(file_name)
    +

    Use this function to print the python code to a file.

    +

    INPUT:

    +
      +
    • file_namestring; name of the output file

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher as identity
    +sage: identity = identity()
    +sage: identity.file_name
    +'identity_block_cipher_p32_k32_o32_r1.py'
    +sage: identity.print_evaluation_python_code_to_file(identity.id + 'evaluation.py') # doctest: +SKIP
    +
    +
    +
    + +
    +
    +print_input_information()
    +

    Print a list of the inputs with their corresponding bit size.

    +
    +
    Possible cipher inputs are:
      +
    • plaintext

    • +
    • key

    • +
    • tweak

    • +
    • initialization vector

    • +
    • nonce

    • +
    • constant

    • +
    • etc.

    • +
    +
    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher()
    +sage: fancy.print_input_information()
    +plaintext of bit size 24
    +key of bit size 24
    +
    +
    +
    + +
    +
    +property reference_code
    +
    + +
    +
    +remove_key_schedule()
    +
    + +
    +
    +remove_round_component(round_id, component)
    +
    + +
    +
    +remove_round_component_from_id(round_id, component_id)
    +
    + +
    +
    +property rounds
    +
    + +
    +
    +property rounds_as_list
    +
    + +
    +
    +set_file_name(file_name)
    +
    + +
    +
    +set_id(cipher_id)
    +
    + +
    +
    +set_inputs(inputs_ids_list, inputs_bit_size_list)
    +
    + +
    +
    +sort_cipher()
    +
    + +
    +
    +test_against_reference_code(number_of_tests=5)
    +

    Test the graph representation against its reference implementation (if available) with random inputs.

    +

    INPUT:

    +
      +
    • number_of_testsinteger (default: 5); number of tests to execute

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.xtea_block_cipher import XTeaBlockCipher as xtea
    +sage: xtea(number_of_rounds=32).test_against_reference_code()
    +True
    +
    +
    +
    + +
    +
    +test_vector_check(list_of_test_vectors_input, list_of_test_vectors_output)
    +

    Testing the cipher with list of test vectors input and list of test vectors output.

    +

    INPUT:

    +
      +
    • list_of_test_vectors_inputlist; list of input testing vectors

    • +
    • list_of_test_vectors_outputlist; list of the expected output of the corresponding input testing +vectors. That is, list_of_test_vectors_output[i] = cipher.evaluate(list_of_test_vectors_input[i])

    • +
    +

    OUTPUT:

    +
      +
    • test_result – output of the testing. True if all the cipher.evaluate(input)=output for every input

    • +
    +

    test vectors, and False, otherwise.

    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    +sage: speck = speck(number_of_rounds=22)
    +sage: key1 = 0x1918111009080100
    +sage: plaintext1 = 0x6574694c
    +sage: ciphertext1 = 0xa86842f2
    +sage: key2 = 0x1918111009080100
    +sage: plaintext2 = 0x6574694d
    +sage: ciphertext2 = 0x2b5f25d6
    +sage: input_list=[[plaintext1, key1], [plaintext2, key2]]
    +sage: output_list=[ciphertext1, ciphertext2]
    +sage: speck.test_vector_check(input_list, output_list)
    +True
    +sage: input_list.append([0x11111111, 0x1111111111111111])
    +sage: output_list.append(0xFFFFFFFF)
    +sage: speck.test_vector_check(input_list, output_list)
    +Testing Failed
    +index: 2
    +input:  [286331153, 1229782938247303441]
    +output:  4294967295
    +False
    +
    +
    +
    + +
    +
    +property type
    +
    + +
    + +
    + +
    + +

    Return a list of zero_correlation linear approximations if there are any; otherwise return an empty list +INPUT:

    +
      +
    • techniquestring; {“sat”, “smt”, “milp”, “cp”}: the technique to use for the search

    • +
    • solverstring; the name of the solver to use for the search

    • +
    +
    + +
    + +
    + + +
    +
    +
    +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/docs/build/html/ciphers/stream_ciphers/bivium_stream_cipher.html b/docs/build/html/ciphers/stream_ciphers/bivium_stream_cipher.html index de171d75..c917e74f 100644 --- a/docs/build/html/ciphers/stream_ciphers/bivium_stream_cipher.html +++ b/docs/build/html/ciphers/stream_ciphers/bivium_stream_cipher.html @@ -1,23 +1,24 @@ - + - Bivium stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Bivium stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Bivium stream cipher

    +

    Bivium stream cipher

    class BiviumStreamCipher(iv_bit_size=80, key_bit_size=80, state_bit_size=177, number_of_initialization_clocks=708, keystream_bit_len=256)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of Bivium Stream Cipher.

    @@ -226,94 +227,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    bivium_key_stream(state, clock_number, ks)
    @@ -362,185 +280,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -573,53 +330,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -664,7 +374,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -678,11 +388,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -743,28 +456,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -800,35 +491,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -847,50 +509,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1156,43 +774,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1349,24 +930,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1501,38 +1064,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1609,70 +1140,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1697,13 +1174,13 @@

    Navigation

    This Page

    @@ -1721,7 +1198,7 @@

    Quick search

    - +
    @@ -1736,10 +1213,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1747,7 +1224,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/stream_ciphers/bluetooth_stream_cipher_e0.html b/docs/build/html/ciphers/stream_ciphers/bluetooth_stream_cipher_e0.html index c4c084fe..aa1fc0ed 100644 --- a/docs/build/html/ciphers/stream_ciphers/bluetooth_stream_cipher_e0.html +++ b/docs/build/html/ciphers/stream_ciphers/bluetooth_stream_cipher_e0.html @@ -1,23 +1,24 @@ - + - Bluetooth stream cipher e0 — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Bluetooth stream cipher e0 — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Bluetooth stream cipher e0

    +

    Bluetooth stream cipher e0

    class BluetoothStreamCipherE0(key_bit_size=128, lfsr_state_bit_size=128, fsm_bit_size=4, keystream_bit_len=125)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of bluetooth encryption/decryption keystream generator E0.

    For the detail, one can follow https://eprint.iacr.org/2022/016.pdf

    The Bluetooth E0 algorithm comprises two distinct stages. In the first stage, an @@ -234,94 +235,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -360,185 +278,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -571,53 +328,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    e0_keystream(lfsr_state, fsm_id, fsm_pos, clock_number, ks)
    @@ -672,7 +382,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -686,11 +396,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -751,28 +464,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -808,35 +499,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -855,50 +517,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1164,43 +782,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1357,24 +938,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1509,38 +1072,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1617,70 +1148,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1705,13 +1182,13 @@

    Navigation

    This Page

    @@ -1729,7 +1206,7 @@

    Quick search

    - +
    @@ -1744,10 +1221,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1755,7 +1232,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/stream_ciphers/chacha_stream_cipher.html b/docs/build/html/ciphers/stream_ciphers/chacha_stream_cipher.html index d11ea36c..2ce55372 100644 --- a/docs/build/html/ciphers/stream_ciphers/chacha_stream_cipher.html +++ b/docs/build/html/ciphers/stream_ciphers/chacha_stream_cipher.html @@ -1,23 +1,24 @@ - + - Chacha stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Chacha stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Chacha stream cipher

    +

    Chacha stream cipher

    class ChachaStreamCipher(block_bit_size=512, key_bit_size=256, number_of_rounds=20, block_count=1, chacha_constants=129519094746312487908866675886161683828)
    -

    Bases: claasp.ciphers.permutations.chacha_permutation.ChachaPermutation

    +

    Bases: ChachaPermutation

    Construct an instance of the ChachaStreamCipher class.

    This class is used to store compact representations of a cipher, used to generate the corresponding cipher.

    INPUT:

    @@ -225,94 +226,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    bottom_half_quarter_round(a, b, c, d, state)
    @@ -356,185 +274,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -567,53 +324,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -658,7 +368,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -672,11 +382,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -737,28 +450,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -794,35 +485,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -841,50 +503,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1150,43 +768,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1343,24 +924,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1495,38 +1058,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1608,70 +1139,16 @@

    Navigation

    top_half_quarter_round(a, b, c, d, state)
    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1701,13 +1178,13 @@

    Navigation

    This Page

    @@ -1725,7 +1202,7 @@

    Quick search

    - +
    @@ -1740,10 +1217,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1751,7 +1228,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/stream_ciphers/snow3g_stream_cipher.html b/docs/build/html/ciphers/stream_ciphers/snow3g_stream_cipher.html index bf1ff6bf..f39dae35 100644 --- a/docs/build/html/ciphers/stream_ciphers/snow3g_stream_cipher.html +++ b/docs/build/html/ciphers/stream_ciphers/snow3g_stream_cipher.html @@ -1,23 +1,24 @@ - + - Snow3g stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Snow3g stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Snow3g stream cipher

    +

    Snow3g stream cipher

    class Snow3GStreamCipher(iv_bit_size=128, key_bit_size=128, number_of_initialization_clocks=32, keystream_word_size=2)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of SNOW3G stream cipher.

    INPUT:

    @@ -251,94 +252,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -392,190 +310,29 @@

    Navigation

    clock_lfsr_initialization_mode(F, const_0)
    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_alpha_state(const_0)
    -
    -convert_to_compound_xor_cipher()
    +
    +create_networx_graph_from_input_ids()
    -
    -create_alpha_state(const_0)
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -608,53 +365,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -699,7 +409,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -713,11 +423,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -778,28 +491,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -835,35 +526,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -882,50 +544,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1196,43 +814,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1389,24 +970,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1541,38 +1104,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1659,70 +1190,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1747,13 +1224,13 @@

    Navigation

    This Page

    @@ -1771,7 +1248,7 @@

    Quick search

    - +
    @@ -1786,10 +1263,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1797,7 +1274,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/stream_ciphers/trivium_stream_cipher.html b/docs/build/html/ciphers/stream_ciphers/trivium_stream_cipher.html index 9eb8c5c4..2748a641 100644 --- a/docs/build/html/ciphers/stream_ciphers/trivium_stream_cipher.html +++ b/docs/build/html/ciphers/stream_ciphers/trivium_stream_cipher.html @@ -1,22 +1,23 @@ - + - Trivium stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Trivium stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Trivium stream cipher

    +

    Trivium stream cipher

    class TriviumStreamCipher(iv_bit_size=80, key_bit_size=80, state_bit_size=288, number_of_initialization_clocks=1152, keystream_bit_len=512)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of Trivium Stream Cipher.

    INPUT:
      @@ -225,94 +226,11 @@

      Navigation

      add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -351,185 +269,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -562,53 +319,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -653,7 +363,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -667,11 +377,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -732,28 +445,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -789,35 +480,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -836,50 +498,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1150,43 +768,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1343,24 +924,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1495,38 +1058,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1603,65 +1134,6 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    trivium_key_stream(state, clock_number, key_stream)
    @@ -1677,6 +1149,11 @@

    Navigation

    property type
    +
    + +
    +
    @@ -1706,8 +1183,8 @@

    Previous topic

    Next topic

    -

    Toyspn2

    +

    Chacha stream cipher

    This Page

    @@ -1725,7 +1202,7 @@

    Quick search

    - +
    @@ -1740,7 +1217,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/ciphers/stream_ciphers/zuc_stream_cipher.html b/docs/build/html/ciphers/stream_ciphers/zuc_stream_cipher.html index bc0a3e2b..cf3c3112 100644 --- a/docs/build/html/ciphers/stream_ciphers/zuc_stream_cipher.html +++ b/docs/build/html/ciphers/stream_ciphers/zuc_stream_cipher.html @@ -1,23 +1,24 @@ - + - Zuc stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Zuc stream cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Zuc stream cipher

    +

    Zuc stream cipher

    class ZucStreamCipher(iv_bit_size=128, key_bit_size=128, number_of_initialization_clocks=32, len_keystream_word=2)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Return a cipher object of ZUC stream cipher.

    INPUT:

    EXAMPLES:

    @@ -218,94 +219,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -349,185 +267,24 @@

    Navigation

    clocking_lfsr()
    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -560,53 +317,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -651,7 +361,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -665,11 +375,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -730,28 +443,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -787,35 +478,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -834,50 +496,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1183,43 +801,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1376,24 +957,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1528,38 +1091,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    s_box_layer(lo)
    @@ -1646,70 +1177,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1739,8 +1216,8 @@

    Navigation

    Next topic

    @@ -1763,7 +1240,7 @@

    Quick search

    - +
    @@ -1781,7 +1258,7 @@

    Navigation

    next |
  • - previous |
  • @@ -1789,7 +1266,7 @@

    Navigation

    - + diff --git a/docs/build/html/ciphers/toys/toyspn1.html b/docs/build/html/ciphers/toys/toyspn1.html index 575f183d..155801e7 100644 --- a/docs/build/html/ciphers/toys/toyspn1.html +++ b/docs/build/html/ciphers/toys/toyspn1.html @@ -1,22 +1,23 @@ - + - Toyspn1 — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Toyspn1 — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Toyspn1

    +

    Toyspn1

    class ToySPN1(block_bit_size=6, key_bit_size=6, rotation_layer=1, sbox=[0, 5, 3, 2, 6, 1, 4, 7], number_of_rounds=2)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the ToySPN1 class. This class is used to implement a family of small toy ciphers, the smallest of which has 6-bit block and key size. @@ -271,94 +272,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -397,185 +315,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -608,53 +365,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -699,7 +409,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -713,11 +423,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -778,28 +491,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -835,35 +526,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -882,50 +544,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1191,43 +809,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1384,24 +965,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1536,38 +1099,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1644,70 +1175,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1737,8 +1214,8 @@

    Previous topic

    Next topic

    -

    Rotate component

    +

    Integer

    This Page

    @@ -1756,7 +1233,7 @@

    Quick search

    - +
    @@ -1771,7 +1248,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/ciphers/toys/toyspn2.html b/docs/build/html/ciphers/toys/toyspn2.html index a47a0615..8c4c84c6 100644 --- a/docs/build/html/ciphers/toys/toyspn2.html +++ b/docs/build/html/ciphers/toys/toyspn2.html @@ -1,23 +1,24 @@ - + - Toyspn2 — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Toyspn2 — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Toyspn2

    +

    Toyspn2

    class ToySPN2(block_bit_size=6, key_bit_size=6, rotation_layer=1, round_key_rotation=1, sbox=[0, 5, 3, 2, 6, 1, 4, 7], number_of_rounds=2)
    -

    Bases: claasp.cipher.Cipher

    +

    Bases: Cipher

    Construct an instance of the ToySPN2 class. This class is used to implement a family of small toy ciphers, the smallest of which has 6-bit block and key size. @@ -264,94 +265,11 @@

    Navigation

    add_word_permutation_component(input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -
    -
    -algebraic_tests(timeout)
    -

    Return a dictionary explaining the result of the algebraic test.

    -

    INPUT:

    -
      -
    • timeoutinteger; the timeout for the Grobner basis computation in seconds

    • -
    -

    OUTPUTS: a dictionary with the following keys:

    -
    -
      -
    • npolynomials – number of polynomials

    • -
    • nvariables – number of variables

    • -
    • timeout – timeout in seconds

    • -
    • pass – whether the algebraic test pass w.r.t the given timeout

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=2)
    -sage: d = speck.algebraic_tests(5)  # long time
    -sage: d == {'input_parameters': {'timeout': 5}, 'test_results':
    -....: {'number_of_variables': [304, 800],
    -....: 'number_of_equations': [240, 688], 'number_of_monomials': [304, 800],
    -....: 'max_degree_of_equations': [1, 1], 'test_passed': [False, False]}}  # long time
    -True
    -
    -
    -
    - -
    -
    -analyze_cipher(tests_configuration)
    -

    Generate a dictionary with the analysis of the cipher.

    -

    The analysis is related to the following tests:

    -
      -
    • Diffusion Tests

    • -
    -

    INPUT:

    -
      -
    • tests_configurationpython dictionary

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: tests_configuration = {"diffusion_tests": {"run_tests": True, "number_of_samples": 100,
    -....: "run_avalanche_dependence": True, "run_avalanche_dependence_uniform": True,
    -....: "run_avalanche_weight": True, "run_avalanche_entropy": True,
    -....: "avalanche_dependence_uniform_bias": 0.2, "avalanche_dependence_criterion_threshold": 0,
    -....: "avalanche_dependence_uniform_criterion_threshold":0, "avalanche_weight_criterion_threshold": 0.1,
    -....: "avalanche_entropy_criterion_threshold":0.1}, "component_analysis_tests": {"run_tests": True}}
    -sage: analysis = sp.analyze_cipher(tests_configuration)
    -sage: analysis["diffusion_tests"]["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][31]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    as_python_dictionary()
    -
    -
    -avalanche_probability_vectors(nb_samples)
    -

    Return the avalanche probability vectors of each input bit difference for each round.

    -

    The inputs considered are plaintext, key, etc.

    -

    The i-th component of the vector is the probability that i-th bit of the output -flips due to the input bit difference.

    -
    -

    Note

    -

    apvs[“key”][“round_output”][position][index_occurrence] = vector of round_output size with input diff -injected in key

    -
    -

    INPUT:

    -
      -
    • nb_samplesinteger; used to compute the estimated probability of flipping

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: apvs["key"]["round_output"][31][0] # random
    -
    -
    -
    -
    cipher_inverse()
    @@ -390,185 +308,24 @@

    Navigation

    -
    -
    -component_analysis_tests()
    -

    Return a list of dictionaries, each one giving some properties of the cipher’s operations.

    -

    INPUT:

    -
      -
    • None

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: len(result)
    -9
    -
    -
    -
    -
    component_from(round_number, index)
    -
    -compute_criterion_from_avalanche_probability_vectors(all_apvs, avalanche_dependence_uniform_bias)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion.

    -

    ALGORITHM:

    -

    The avalanche dependence is the number of output bit that flip with respect to an input bit difference, -for a given round. -If the worst avalanche dependence for a certain round is close to the output bit size with respect to a certain -threshold, we say that the cipher satisfies the avalanche dependence criterion for this round.

    -

    The avalanche dependence uniform is the number of output bit that flip with a probability -\in \left[\frac{1}{2} - \text{bias}; \frac{1}{2} + \text{bias}\right], -with respect to an input bit difference, for a given round. If the worst avalanche dependence uniform for a -certain round is close to the output bit size with respect to a certain threshold, -we say that the cipher satisfies the avalanche dependence uniform criterion for this round.

    -

    The avalanche weight is the expected Hamming weight of the output difference with respect to an input bit -difference, for a given round. -If the avalanche weights of all the input bit differences for a certain round is close to half of -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -avalanche criterion for this round.

    -

    The avalanche entropy is defined as uncertainty about whether output bits flip with respect to an input -bit difference, for a given round. -If the strict avalanche entropy of all the input bit differences for a certain round is close to -the output bit size with respect to a certain threshold, we say that the cipher satisfies the -strict avalanche criterion for this round.

    -
    -

    Note

    -

    d[“key”][“round_output”][position][index_occurrence][“avalanche_dependence”] = vector of round_output size -with input diff injected in key

    -
    -

    INPUT:

    -
      -
    • all_apvsdictionary; all avalanche probability vectors returned by avalanche_probability_vectors()

    • -
    • avalanche_dependence_uniform_biasfloat; define the range where the probability of flipping should be

    • -
    -
    -

    See also

    -

    avalanche_probability_vectors() for the returning vectors.

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck = speck(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: apvs = speck.avalanche_probability_vectors(100)
    -sage: d = speck.compute_criterion_from_avalanche_probability_vectors(apvs, 0.2)
    -sage: d["key"]["round_output"][0][0]["avalanche_dependence_vectors"] # random
    -
    -
    -
    - -
    -
    -continuous_avalanche_factor(lambda_value, number_of_samples)
    -

    Continuous generalization of the metric Avalanche Factor. This method implements Definition 14 of [MUR2020].

    -

    INPUT:

    -
      -
    • lambda_valuefloat; threshold value used to express the input difference

    • -
    • number_of_samplesinteger; number of samples used to compute the continuous avalanche factor

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2)
    -sage: result = speck_cipher.continuous_avalanche_factor(0.001, 10)
    -sage: result['plaintext']['round_key_output']['continuous_avalanche_factor']['values'][0]['value']
    -0.0
    -
    -
    -
    - -
    -
    -continuous_diffusion_factor(beta_number_of_samples, gf_number_samples)
    -

    Continuous Diffusion Factor. This method implements Definition 16 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=2) # long time
    -sage: output = speck_cipher.continuous_diffusion_factor(5, 20) # long time
    -sage: output['plaintext']['cipher_output']['diffusion_factor']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_diffusion_tests(continuous_avalanche_factor_number_of_samples=100, threshold_for_avalanche_factor=0.001, continuous_neutral_measure_beta_number_of_samples=10, continuous_neutral_measure_gf_number_samples=10, continuous_diffusion_factor_beta_number_of_samples=10, continuous_diffusion_factor_gf_number_samples=10, is_continuous_avalanche_factor=True, is_continuous_neutrality_measure=True, is_diffusion_factor=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each metric in [MUR2020].

    -

    INPUT:

    -
      -
    • continuous_avalanche_factor_number_of_samplesinteger (default: 100); number of samples -used to obtain the metric continuous_avalanche_factor

    • -
    • threshold_for_avalanche_factorfloat (default: 0.001); threshold value used to compute the -input difference for the metric continuous_avalanche_factor

    • -
    • continuous_neutral_measure_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_neutral_measure_gf_number_samplesinteger (default: 10); number of vectors used -to approximate gf_2

    • -
    • continuous_diffusion_factor_beta_number_of_samplesinteger (default: 10); number of samples -used to compute the continuous measure metric

    • -
    • continuous_diffusion_factor_gf_number_samplesinteger (default: 10); number of vectors -used to approximate gf_2

    • -
    • is_continuous_avalanche_factorboolean (default: True); flag indicating if we want the -continuous_avalanche_factor or not

    • -
    • is_continuous_neutrality_measureboolean (default: True); flag indicating if we want the -continuous_neutrality_measure or not

    • -
    • is_diffusion_factorboolean (default: True); flag indicating if we want the -continuous_neutrality_measure, or not

    • -
    -

    OUTPUT:

    -
    -
      -
    • A python dictionary that contains the test result to each metric. E.g.: continuous_neutrality_measure, -continuous_avalanche_factor, diffusion_factor

    • -
    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: speck_cipher = speck(number_of_rounds=1) # long time
    -sage: output = speck_cipher.continuous_diffusion_tests() # long time
    -sage: output['plaintext']['round_key_output']['continuous_neutrality_measure']['values'][0]['1'] == 0.0 # long time
    -True
    -
    -
    -
    - -
    -
    -continuous_neutrality_measure_for_bit_j(beta_number_of_samples, gf_number_samples, input_bit=None, output_bits=None)
    -

    Continuous Neutrality Measure. This method implements Definition 15 of [MUR2020].

    -

    INPUT:

    -
      -
    • beta_number_of_samplesinteger; number of samples used to compute the continuous measure metric

    • -
    • gf_number_samplesinteger; number of vectors used to approximate gf_2

    • -
    • input_bitinteger (default: None); input bit position to be analyzed

    • -
    • output_bitslist (default: None); output bit positions to be analyzed

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: output = speck(number_of_rounds=2).continuous_neutrality_measure_for_bit_j(50, 200) # long time
    -sage: output['plaintext']['cipher_output']['continuous_neutrality_measure']['values'][0]['2'] > 0 # long time
    -True
    -
    -
    -
    +
    +convert_to_compound_xor_cipher()
    +
    -
    -continuous_neutrality_measure_for_bit_j_and_beta(input_bit, beta, number_of_samples, output_bits)
    +
    +create_networx_graph_from_input_ids()
    -
    -convert_to_compound_xor_cipher()
    +
    +create_top_and_bottom_subgraphs_from_components_graph(e0_bottom_ids, e1_top_ids)
    @@ -601,53 +358,6 @@

    Navigation

    -
    -
    -diffusion_tests(number_of_samples=5, avalanche_dependence_uniform_bias=0.05, avalanche_dependence_criterion_threshold=0, avalanche_dependence_uniform_criterion_threshold=0, avalanche_weight_criterion_threshold=0.01, avalanche_entropy_criterion_threshold=0.01, run_avalanche_dependence=True, run_avalanche_dependence_uniform=True, run_avalanche_weight=True, run_avalanche_entropy=True)
    -

    Return a python dictionary that contains the dictionaries corresponding to each criterion and their analysis.

    -

    INPUT:

    -
      -
    • number_of_samplesinteger (default: 5); used to compute the estimated probability of flipping

    • -
    • avalanche_dependence_uniform_biasfloat (default: 0.05); define the range where the probability -of flipping should be

    • -
    • avalanche_dependence_criterion_thresholdfloat (default: 0); It is a bias. The criterion is satisfied -for a given input bit difference if for all output bits of the round under analysis, the corresponding -avalanche dependence criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_dependence_uniform_criterion_thresholdfloat (default: 0); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche dependence uniform criterion d is such that -block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • avalanche_weight_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche weight criterion is such that block_bit_size/2 - bias <= d <= block_bit_size/2 + bias

    • -
    • avalanche_entropy_criterion_thresholdfloat (default: 0.01); It is a bias. The criterion is -satisfied for a given input bit difference if for all output bits of the round under analysis, the -corresponding avalanche entropy criterion d is such that block_bit_size - bias <= d <= block_bit_size + bias

    • -
    • run_avalanche_dependenceboolean (default: True); if True, add the avalanche dependence results -to the output dictionary

    • -
    • run_avalanche_dependence_uniformboolean (default: True); if True, add the avalanche dependence -uniform results to the output dictionary

    • -
    • run_avalanche_weightboolean (default: True); if True, add the avalanche weight results to the -output dictionary

    • -
    • run_avalanche_entropyboolean (default: True); if True, add the avalanche entropy results to the -output dictionary

    • -
    -
    -

    Note

    -

    diff inserted in: -d[“test_results”][“plaintext”][“round_output”][“avalanche_entropy”][“differences”][position][ -“output_vectors”][round]

    -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: speck = SpeckBlockCipher(block_bit_size=16, key_bit_size=32, number_of_rounds=5)
    -sage: d = speck.diffusion_tests(number_of_samples=100)
    -sage: d["test_results"]["key"]["round_output"][ # random
    -....: "avalanche_dependence_vectors"]["differences"][0]["output_vectors"][0]["vector"] # random
    -
    -
    -
    -
    evaluate(cipher_input, intermediate_output=False, verbosity=False)
    @@ -692,7 +402,7 @@

    Navigation

    -evaluate_vectorized(cipher_input, intermediate_outputs=False, verbosity=False)
    +evaluate_vectorized(cipher_input, intermediate_output=False, verbosity=False, evaluate_api=False, bit_based=False)

    Return the output of the cipher for multiple inputs.

    The inputs are given as a list cipher_input,such that cipher_inputs[0] contains the first input, and cipher_inputs[1] the second. @@ -706,11 +416,14 @@

    Navigation

    • cipher_inputlist; block cipher inputs (ndarray of uint8 representing one byte each, n rows, m columns, with m the number of inputs to evaluate)

    • -
    • intermediate_outputsboolean (default: False)

    • +
    • intermediate_outputboolean (default: False)

    • verbosityboolean (default: False); set this flag to True in order to print the input/output of each component

    • +
    • evaluate_apiboolean (default: False); if set to True, takes integer inputs (as the evaluate function)

    -

    EXAMPLES:

    +

    and returns integer inputs; it is expected that cipher.evaluate(x) == cipher.evaluate_vectorized(x, evaluate_api = True) +is True. +EXAMPLES:

    sage: import numpy as np
     sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
     sage: speck = speck(block_bit_size=32, key_bit_size=64, number_of_rounds=22)
    @@ -771,28 +484,6 @@ 

    Navigation

    property file_name
    -
    -
    -find_good_input_difference_for_neural_distinguisher(difference_positions, initial_population=32, number_of_generations=50, nb_samples=10000, previous_generation=None, verbose=False)
    -

    Return good neural distinguisher input differences for a cipher.

    -

    INPUT:

    -
      -
    • difference_positionstable of booleans; one for each input to the cipher. True in positions where -differences are allowed

    • -
    • initial_populationinteger (default: 32); parameter of the evolutionary algorithm

    • -
    • number_of_generationsinteger (default: 50); number of iterations of the evolutionary algorithm

    • -
    • nb_samplesinteger (default: 10); number of samples for testing each input difference

    • -
    • previous_generation – (default: None); optional: initial table of differences to try

    • -
    • verboseboolean (default: False); verbosity

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: cipher = SpeckBlockCipher()
    -sage: diff, scores, highest_round = find_good_input_difference_for_neural_distinguisher(cipher, [True, False], verbose = False, number_of_generations=5)
    -
    -
    -
    -
    find_impossible_property(type, technique='sat', solver='kissat', scenario='single-key')
    @@ -828,35 +519,6 @@

    Navigation

    -
    -
    -generate_csv_report(nb_samples, output_absolute_path)
    -

    Generate a CSV report containing criteria to estimate the vulnerability of the cipher.

    -

    This method generate a CSV report containing the criteria presented in the paper -“The design of Xoodoo and Xoofff” [1]. -[1] https://tosc.iacr.org/index.php/ToSC/article/view/7359

    -

    INPUT:

    -
      -
    • nb_samplesinteger; number of samples

    • -
    • output_absolute_pathstring; output of the absolute path

    • -
    -

    EXAMPLES:

    -
    sage: import inspect
    -sage: import claasp
    -sage: import os.path
    -sage: tii_path = inspect.getfile(claasp)
    -sage: tii_dir_path = os.path.dirname(tii_path)
    -sage: from claasp.ciphers.block_ciphers.identity_block_cipher import IdentityBlockCipher
    -sage: identity = IdentityBlockCipher()
    -sage: identity.generate_csv_report(10, f"{tii_dir_path}/{identity.id}_report.csv")
    -sage: os.path.isfile(f"{tii_dir_path}/{identity.id}_report.csv")
    -True
    -sage: import os
    -sage: os.remove(f"{tii_dir_path}/{identity.id}_report.csv")
    -
    -
    -
    -
    generate_evaluate_c_code_shared_library(intermediate_output=False, verbosity=False)
    @@ -875,50 +537,6 @@

    Navigation

    -
    -
    -generate_heatmap_graphs_for_avalanche_tests(avalanche_results, difference_positions=None, criterion_names=None)
    -

    Return a string containing latex instructions to generate heatmap graphs of the avalanche tests. -The string can then be printed on a terminal or on a file.

    -

    INPUT:

    -
      -
    • avalanche_resultsdictionary; results of the avalanche tests

    • -
    • -
      difference_positionslist (default: None); positions of the differences to inject.

      The default value is equivalent to pick one of the worst position for a difference and the average value.

      -
      -
      -
    • -
    • -
      criterion_nameslist (default: None); names of the criteria to observe

      The default value is equivalent to to pick all of the 4 criteria: -- “avalanche_dependence_vectors” -- “avalanche_dependence_uniform_vectors” -- “avalanche_entropy_vectors” -- “avalanche_weight_vectors”

      -
      -
      -
    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    -sage: sp = SpeckBlockCipher(block_bit_size=64, key_bit_size=128, number_of_rounds=5)
    -sage: d = sp.diffusion_tests(number_of_samples=100)
    -sage: h = sp.generate_heatmap_graphs_for_avalanche_tests(d)
    -sage: h[:20]
    -'\documentclass[12pt]'
    -
    -sage: from claasp.ciphers.permutations.ascon_permutation import AsconPermutation
    -sage: ascon = AsconPermutation(number_of_rounds=4)
    -sage: d = ascon.diffusion_tests(number_of_samples=100) # long
    -sage: h = ascon.generate_heatmap_graphs_for_avalanche_tests(d, [0], ["avalanche_weight_vectors"]) # long
    -
    -sage: from claasp.ciphers.permutations.xoodoo_permutation import XoodooPermutation
    -sage: cipher = XoodooPermutation(number_of_rounds=4)
    -sage: d = cipher.diffusion_tests(number_of_samples=100) # long
    -sage: h = cipher.generate_heatmap_graphs_for_avalanche_tests(d, [1,193], ["avalanche_dependence_vectors", "avalanche_entropy_vectors"]) # long
    -
    -
    -
    -
    generate_word_based_c_code(word_size, intermediate_output=False, verbosity=False)
    @@ -1184,43 +802,6 @@

    Navigation

    make_file_name()
    -
    -
    -neural_network_blackbox_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10)
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_blackbox_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    - -
    -
    -neural_network_differential_distinguisher_tests(nb_samples=10000, hidden_layers=[32, 32, 32], number_of_epochs=10, diff=[1])
    -

    Return a python dictionary that contains the accuracies corresponding to each round.

    -

    INPUT:

    -
      -
    • nb_samplesinteger (default: 10000); how many sample the neural network is trained with

    • -
    • hidden_layerslist (default: [32, 32, 32]); a list containing the number of neurons in each -hidden layer of the neural network

    • -
    • number_of_epochsinteger (default: 10); how long is the training of the neural network

    • -
    • difflist (default: [0x01]); list of input differences

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher as speck
    -sage: #speck(number_of_rounds=22).neural_network_differential_distinguisher_tests(nb_samples = 10) # random
    -
    -
    -
    -
    property number_of_rounds
    @@ -1377,24 +958,6 @@

    Navigation

    -
    -
    -print_component_analysis_as_radar_charts(component_analysis_results)
    -

    Return a matplotlib object containing the radar charts of the components analysis test

    -

    INPUT:

    -
      -
    • component_analysis_resultslist; results of the component analysis method

    • -
    -

    EXAMPLES:

    -
    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    -sage: aes = AESBlockCipher(word_size=8, state_size=4, number_of_rounds=2)
    -sage: result = aes.component_analysis_tests()
    -sage: fig = aes.print_component_analysis_as_radar_charts(result)
    -sage: fig.show() # doctest: +SKIP
    -
    -
    -
    -
    print_evaluation_python_code(verbosity=False)
    @@ -1529,38 +1092,6 @@

    Navigation

    property rounds_as_list
    -
    -
    -run_autond_pipeline(difference_positions=None, optimizer_samples=10000, optimizer_generations=50, training_samples=10000000, testing_samples=1000000, number_of_epochs=40, verbose=False)
    -
    -
    Runs the AutoND pipeline ([BGHR2023]):
      -
    • Find an input difference for the inputs set to True in difference_positions using an optimizer

    • -
    • Train a neural distinguisher based on DBitNET for that input difference, increasing the number of rounds

    • -
    -

    until the accuracy is no better than random guessing.

    -
    -
    -

    INPUT:

    -
      -
    • difference_positionslist of booleans; default: `True in the plaintext position, False in the

    • -
    -

    other positions`. If specified, must have the same length as self.inputs_bit_size, and contain one boolean per -input position. The optimizer will look for input differences in the positions set to True; by default, -the single-key case will be run. -- optimizer_samplesinteger; number of samples used by the optimizer; higher values increase the -quality of the optimizer, at the cost of a longer runtime. -- optimizer_generationsinteger; (default: 50) number of generations used by the optimizer; -higher values increase the runtime. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs -- verboseboolean; (default: False) verbosity of the optimizer

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: cipher.run_autond_pipeline()

    -
    -
    set_file_name(file_name)
    @@ -1637,70 +1168,16 @@

    Navigation

    -
    -
    -train_gohr_neural_distinguisher(input_difference, number_of_rounds, depth=1, word_size=0, training_samples=10000000, testing_samples=1000000, number_of_epochs=200)
    -

    Trains a differential neural distinguisher on nr rounds, for the input difference input_difference, using a slightly -modified (AMSGrad instead of cyclic learning rate schedule) depth depth Gohr’s RESNet ([Go2019]).

    -

    INPUT:

    -
      -
    • input_differencelist of integers; The input difference, expressed as a list with one value per -input to the cipher.

    • -
    • number_of_roundsinteger; number of rounds to analyze

    • -
    • depthinteger; (default: 1) the depth of the neural network, as defined in Gohr’s paper

    • -
    • word_sizeinteger; the word size of the cipher, determines the shape of the neural network.

    • -
    -

    Defaults to output_bit_size when unspecified (may reduce the accuracy of the obtained distinguisher). -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- number_of_epochsinteger; (default: 40) number of training epochs

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: number_of_rounds = 5 -sage: cipher.train_gohr_neural_distinguisher(input_differences, number_of_rounds, word_size = 16, number_of_epochs = 1) -2000/2000 [==============================] - 294s 146ms/step - loss: 0.0890 - acc: 0.8876 - val_loss: 0.0734 - val_acc: 0.9101 -Validation accuracy at 5 rounds :0.9101160168647766 -0.9101160168647766

    -
    - -
    -
    -train_neural_distinguisher(data_generator, starting_round, neural_network, training_samples=10000000, testing_samples=1000000, epochs=5, pipeline=True)
    -

    Trains a neural distinguisher for the data generated by the data_generator function, using the provided neural network, at round starting_rounds. -If pipeline is set to True, retrains the distinguisher for one more round, as long as the validation accuracy remains significant.

    -

    INPUT:

    -
      -
    • data_generatorfunction; A dataset generation function, taking as input a cipher (usually self), a number of rounds,

    • -
    -

    and a number of samples, an returns a dataset X, Y, where X is a numpy matrix with one row per sample, and Y is a label veector. -To reproduce classical neural distinguisher results, on would use the example below. -- starting_roundinteger; number of rounds to analyze -- neural_network(compiled) keras model (default: None); the neural network to use for distinguishing, either a custom one or one -returned by the get_neural_network function of neural_network_tests. -- training_samplesinteger; (default: 10**7) number samples used for training -- testing_samplesinteger; (default: 10**6) number samples used for testing -- pipelineboolean; (default: True) If False, only trains for starting_round. If True, increments starting_round and retrain -the model as long as the accuracy is statistically significant. -- verboseboolean (default: False); verbosity

    -

    EXAMPLES:: -sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher

    -
    -

    sage: from claasp.cipher_modules.neural_network_tests import get_differential_dataset, get_neural_network -sage: cipher = SpeckBlockCipher() -sage: input_differences = [0x400000, 0] -sage: data_generator = lambda nr, samples: get_differential_dataset(cipher, input_differences, number_of_rounds = nr, samples = samples) -sage: neural_network = get_neural_network(‘gohr_resnet’, input_size = 64) -sage: cipher.train_neural_distinguisher(data_generator, starting_round = 5, neural_network = neural_network)

    -
    -
    -
    property type
    +
    + +
    +
    @@ -1725,8 +1202,8 @@

    Navigation

    Next topic

    @@ -1749,7 +1226,7 @@

    Quick search

    - +
    @@ -1767,7 +1244,7 @@

    Navigation

    next |
  • - previous |
  • @@ -1775,7 +1252,7 @@

    Navigation

    - + diff --git a/docs/build/html/component.html b/docs/build/html/component.html index cb1e7a7e..b73dd6da 100644 --- a/docs/build/html/component.html +++ b/docs/build/html/component.html @@ -1,23 +1,24 @@ - + - Component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Component

    +

    Component

    class Component(component_id, component_type, component_input, output_bit_size, description)
    @@ -214,13 +215,13 @@

    Navigation

    Previous topic

    -

    Cipher

    +

    Rounds

    Next topic

    -

    Rounds

    +

    Round

    This Page

    @@ -238,7 +239,7 @@

    Quick search

    - +
    @@ -253,10 +254,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -264,7 +265,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/and_component.html b/docs/build/html/components/and_component.html index ddaa1d66..9b068c8d 100644 --- a/docs/build/html/components/and_component.html +++ b/docs/build/html/components/and_component.html @@ -1,23 +1,24 @@ - + - And component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + And component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    And component

    +

    And component

    class AND(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size)
    -

    Bases: claasp.components.multi_input_non_linear_logical_operator_component.MultiInputNonlinearLogicalOperator

    +

    Bases: MultiInputNonlinearLogicalOperator

    algebraic_polynomials(model)
    @@ -430,7 +431,7 @@

    Navigation

    [0 <= -1*x_32 + x_48, 0 <= -1*x_33 + x_49, ... -x_64 == 10*x_48 + 10*x_49 + 10*x_50 + 10*x_51 + 10*x_52 + 10*x_53 + 10*x_54 + 10*x_55 + 10*x_56 + 10*x_57 + 10*x_58 + 10*x_59 + 10*x_60 + 10*x_61 + 10*x_62 + 10*x_63] +x_64 == 100*x_48 + 100*x_49 + 100*x_50 + 100*x_51 + 100*x_52 + 100*x_53 + 100*x_54 + 100*x_55 + 100*x_56 + 100*x_57 + 100*x_58 + 100*x_59 + 100*x_60 + 100*x_61 + 100*x_62 + 100*x_63]
    @@ -471,7 +472,7 @@

    Navigation

    ... 0 <= -1*x_15 + x_47, x_48 == x_32 + x_33 + x_34 + x_35 + x_36 + x_37 + x_38 + x_39 + x_40 + x_41 + x_42 + x_43 + x_44 + x_45 + x_46 + x_47, -x_49 == 10*x_48] +x_49 == 100*x_48]
    @@ -506,6 +507,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for AND/OR in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=3)
    +sage: and_component = fancy.component_from(0, 8)
    +sage: and_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['and_0_8_0_0',
    +  'and_0_8_1_0',
    +  'and_0_8_2_0',
    +  ...
    +  'and_0_8_11_0 -key_23_1',
    +  'and_0_8_11_0 -and_0_8_11_1',
    +  'xor_0_7_11_0 key_23_0 xor_0_7_11_1 key_23_1 -and_0_8_11_0'])
    +
    +
    +
    +
    sat_constraints()
    @@ -763,8 +793,8 @@

    Navigation

    Previous topic

    -

    Modular component

    +

    Sbox component

    Next topic

    @@ -787,7 +817,7 @@

    Quick search

    - +
    @@ -805,7 +835,7 @@

    Navigation

    next |
  • - previous |
  • @@ -813,7 +843,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/cipher_output_component.html b/docs/build/html/components/cipher_output_component.html index 8a57e86e..2d764a6e 100644 --- a/docs/build/html/components/cipher_output_component.html +++ b/docs/build/html/components/cipher_output_component.html @@ -1,23 +1,24 @@ - + - Cipher output component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Cipher output component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Cipher output component

    +

    Cipher output component

    class CipherOutput(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, is_intermediate=False, output_tag='')
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    as_python_dictionary()
    @@ -441,6 +442,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for OUTPUT in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: output_component = speck.component_from(2, 12)
    +sage: output_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['cipher_output_2_12_0_0',
    +  'cipher_output_2_12_1_0',
    +  'cipher_output_2_12_2_0',
    +  ...
    +  'xor_2_10_14_1 -cipher_output_2_12_30_1',
    +  'cipher_output_2_12_31_1 -xor_2_10_15_1',
    +  'xor_2_10_15_1 -cipher_output_2_12_31_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -469,11 +499,6 @@

    Navigation

    -
    -
    -sat_deterministic_truncated_xor_differential_trail_constraints()
    -
    -
    sat_xor_differential_propagation_constraints(model)
    @@ -619,13 +644,13 @@

    Navigation

    This Page

    @@ -643,7 +668,7 @@

    Quick search

    - +
    @@ -658,10 +683,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -669,7 +694,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/concatenate_component.html b/docs/build/html/components/concatenate_component.html index 5b498068..26323453 100644 --- a/docs/build/html/components/concatenate_component.html +++ b/docs/build/html/components/concatenate_component.html @@ -1,22 +1,23 @@ - + - Concatenate component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Concatenate component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Concatenate component

    +

    Concatenate component

    class Concatenate(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    as_python_dictionary()
    @@ -224,8 +225,8 @@

    Previous topic

    This Page

    @@ -243,7 +244,7 @@

    Quick search

    - +
    @@ -258,7 +259,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/components/constant_component.html b/docs/build/html/components/constant_component.html index 3c1356a7..eb118f12 100644 --- a/docs/build/html/components/constant_component.html +++ b/docs/build/html/components/constant_component.html @@ -1,23 +1,24 @@ - + - Constant component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Constant component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Constant component

    +

    Constant component

    class Constant(current_round_number, current_round_number_of_components, output_bit_size, value)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    algebraic_polynomials(model)
    @@ -490,6 +491,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for CONSTANT in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: constant_component = speck.component_from(2, 0)
    +sage: constant_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['constant_2_0_0_0',
    +  'constant_2_0_1_0',
    +  'constant_2_0_2_0',
    +  ...
    +  '-constant_2_0_13_1',
    +  '-constant_2_0_14_1',
    +  '-constant_2_0_15_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -518,11 +548,6 @@

    Navigation

    -
    -
    -sat_deterministic_truncated_xor_differential_trail_constraints()
    -
    -
    sat_xor_differential_propagation_constraints(model=None)
    @@ -714,13 +739,13 @@

    Navigation

    This Page

    @@ -738,7 +763,7 @@

    Quick search

    - +
    @@ -753,10 +778,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -764,7 +789,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/fsr_component.html b/docs/build/html/components/fsr_component.html index cb26e78d..3b3fd244 100644 --- a/docs/build/html/components/fsr_component.html +++ b/docs/build/html/components/fsr_component.html @@ -1,23 +1,24 @@ - + - Fsr component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Fsr component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Fsr component

    +

    Fsr component

    class FSR(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, description)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    as_python_dictionary()
    @@ -199,13 +200,13 @@

    Navigation

    This Page

    @@ -223,7 +224,7 @@

    Quick search

    - +
    @@ -238,10 +239,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -249,7 +250,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/intermediate_output_component.html b/docs/build/html/components/intermediate_output_component.html index 73f3b2d9..a518b2cd 100644 --- a/docs/build/html/components/intermediate_output_component.html +++ b/docs/build/html/components/intermediate_output_component.html @@ -1,23 +1,24 @@ - + - Intermediate output component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Intermediate output component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Intermediate output component

    +

    Intermediate output component

    class IntermediateOutput(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, output_tag)
    -

    Bases: claasp.components.cipher_output_component.CipherOutput

    +

    Bases: CipherOutput

    as_python_dictionary()
    @@ -445,6 +446,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for OUTPUT in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: output_component = speck.component_from(2, 12)
    +sage: output_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['cipher_output_2_12_0_0',
    +  'cipher_output_2_12_1_0',
    +  'cipher_output_2_12_2_0',
    +  ...
    +  'xor_2_10_14_1 -cipher_output_2_12_30_1',
    +  'cipher_output_2_12_31_1 -xor_2_10_15_1',
    +  'xor_2_10_15_1 -cipher_output_2_12_31_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -473,11 +503,6 @@

    Navigation

    -
    -
    -sat_deterministic_truncated_xor_differential_trail_constraints()
    -
    -
    sat_xor_differential_propagation_constraints(model)
    @@ -629,13 +654,13 @@

    Navigation

    This Page

    @@ -653,7 +678,7 @@

    Quick search

    - +
    @@ -668,10 +693,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -679,7 +704,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/linear_layer_component.html b/docs/build/html/components/linear_layer_component.html index 22a2d4ac..8ddbb70f 100644 --- a/docs/build/html/components/linear_layer_component.html +++ b/docs/build/html/components/linear_layer_component.html @@ -1,23 +1,24 @@ - + - Linear layer component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Linear layer component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Linear layer component

    +

    Linear layer component

    class LinearLayer(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, description)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    algebraic_polynomials(model)
    @@ -473,6 +474,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for LINEAR LAYER in +SAT DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=3)
    +sage: linear_layer_component = fancy.component_from(0, 6)
    +sage: constraints = linear_layer_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +sage: constraints[1][11]
    +'inter_0_linear_layer_0_6_0_1 inter_1_linear_layer_0_6_0_0 inter_1_linear_layer_0_6_0_1 -sbox_0_1_0_1'
    +
    +
    +
    +
    sat_constraints()
    @@ -652,13 +677,13 @@

    Navigation

    This Page

    @@ -676,7 +701,7 @@

    Quick search

    - +
    @@ -691,10 +716,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -702,7 +727,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/mix_column_component.html b/docs/build/html/components/mix_column_component.html index c4e3f9ed..5378eb40 100644 --- a/docs/build/html/components/mix_column_component.html +++ b/docs/build/html/components/mix_column_component.html @@ -1,23 +1,24 @@ - + - Mix column component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Mix column component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Mix column component

    +

    Mix column component

    class MixColumn(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, description)
    -

    Bases: claasp.components.linear_layer_component.LinearLayer

    +

    Bases: LinearLayer

    algebraic_polynomials(model)
    @@ -509,6 +510,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for MIX COLUMN in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=3)
    +sage: mix_column_component = midori.component_from(0, 23)
    +sage: out_ids, constraints = mix_column_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +sage: constraints[7]
    +'mix_column_0_23_0_0 -inter_0_mix_column_0_23_0_0'
    +
    +
    +
    +
    sat_constraints()
    @@ -697,13 +722,13 @@

    Navigation

    Previous topic

    -

    Constant component

    +

    Not component

    This Page

    @@ -721,7 +746,7 @@

    Quick search

    - +
    @@ -736,10 +761,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -747,7 +772,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/modadd_component.html b/docs/build/html/components/modadd_component.html index 0e461419..664d77ea 100644 --- a/docs/build/html/components/modadd_component.html +++ b/docs/build/html/components/modadd_component.html @@ -1,23 +1,24 @@ - + - Modadd component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Modadd component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Modadd component

    +

    Modadd component

    class MODADD(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, modulus)
    -

    Bases: claasp.components.modular_component.Modular

    +

    Bases: Modular

    algebraic_polynomials(model)
    @@ -272,6 +273,11 @@

    Navigation

    +
    +
    +create_bct_mzn_constraint_from_component_ids()
    +
    +
    property description
    @@ -426,7 +432,7 @@

    Navigation

    x_15 <= x_48, ... -2 <= -1*x_0 - x_16 - x_17 + x_32 + x_63, -x_64 == 10*x_49 + 10*x_50 + 10*x_51 + 10*x_52 + 10*x_53 + 10*x_54 + 10*x_55 + 10*x_56 + 10*x_57 + 10*x_58 + 10*x_59 + 10*x_60 + 10*x_61 + 10*x_62 + 10*x_63] +x_64 == 100*x_49 + 100*x_50 + 100*x_51 + 100*x_52 + 100*x_53 + 100*x_54 + 100*x_55 + 100*x_56 + 100*x_57 + 100*x_58 + 100*x_59 + 100*x_60 + 100*x_61 + 100*x_62 + 100*x_63]
    @@ -460,7 +466,7 @@

    Navigation

    ... -4 <= x_15 + x_31 + x_47 + x_63 + x_64, x_65 == x_48 + x_49 + x_50 + x_51 + x_52 + x_53 + x_54 + x_55 + x_56 + x_57 + x_58 + x_59 + x_60 + x_61 + x_62 + x_63, - x_66 == 10*x_65] + x_66 == 100*x_65]
    @@ -516,6 +522,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for Modular Addition +in DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: modadd_component = speck.component_from(0, 1)
    +sage: modadd_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['modadd_0_1_0_0',
    +  'modadd_0_1_1_0',
    +  'modadd_0_1_2_0',
    +  ...
    +  'rot_0_0_15_0 plaintext_31_0 -rot_0_0_15_1 -modadd_0_1_15_0',
    +  'rot_0_0_15_0 plaintext_31_0 -plaintext_31_1 -modadd_0_1_15_0',
    +  'modadd_0_1_15_0 -rot_0_0_15_1 -plaintext_31_1 -modadd_0_1_15_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -828,13 +863,13 @@

    Navigation

    Previous topic

    -

    Or component

    +

    Constant component

    This Page

    @@ -852,7 +887,7 @@

    Quick search

    - +
    @@ -867,10 +902,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -878,7 +913,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/modsub_component.html b/docs/build/html/components/modsub_component.html index 22b76636..f5d51237 100644 --- a/docs/build/html/components/modsub_component.html +++ b/docs/build/html/components/modsub_component.html @@ -1,23 +1,24 @@ - + - Modsub component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Modsub component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,40 @@

    Navigation

    -

    Modsub component

    +

    Modsub component

    class MODSUB(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, modulus)
    -

    Bases: claasp.components.modular_component.Modular

    +

    Bases: Modular

    +
    +
    +algebraic_polynomials(model)
    +

    Return a list of polynomials representing Modular subtraction operation

    +

    INPUT:

    +
      +
    • modelmodel object; a model instance

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.cipher_modules.models.algebraic.algebraic_model import AlgebraicModel
    +sage: from claasp.cipher import Cipher
    +sage: cipher = Cipher("cipher_name", "permutation", ["input"], [8], 8)
    +sage: cipher.add_round()
    +sage: modsub_0_0 = cipher.add_MODSUB_component(["input","input"], [[0,1,2,3],[4,5,6,7]], 4)
    +sage: modsub_component = cipher.get_component_from_id('modsub_0_0')
    +sage: algebraic = AlgebraicModel(cipher)
    +sage: modsub_component.algebraic_polynomials(algebraic)
    +[modsub_0_0_b0_0,
    + modsub_0_0_b0_0 + modsub_0_0_y0 + modsub_0_0_x4 + modsub_0_0_x0,
    + modsub_0_0_x4*modsub_0_0_b0_0 + modsub_0_0_x0*modsub_0_0_b0_0 + modsub_0_0_x0*modsub_0_0_x4 + modsub_0_0_b0_1 + modsub_0_0_b0_0 + modsub_0_0_x4,
    + modsub_0_0_b0_1 + modsub_0_0_y1 + modsub_0_0_x5 + modsub_0_0_x1,
    + modsub_0_0_x5*modsub_0_0_b0_1 + modsub_0_0_x1*modsub_0_0_b0_1 + modsub_0_0_x1*modsub_0_0_x5 + modsub_0_0_b0_2 + modsub_0_0_b0_1 + modsub_0_0_x5,
    + modsub_0_0_b0_2 + modsub_0_0_y2 + modsub_0_0_x6 + modsub_0_0_x2,
    + modsub_0_0_x6*modsub_0_0_b0_2 + modsub_0_0_x2*modsub_0_0_b0_2 + modsub_0_0_x2*modsub_0_0_x6 + modsub_0_0_b0_3 + modsub_0_0_b0_2 + modsub_0_0_x6,
    + modsub_0_0_b0_3 + modsub_0_0_y3 + modsub_0_0_x7 + modsub_0_0_x3]
    +
    +
    +
    +
    as_python_dictionary()
    @@ -251,6 +281,11 @@

    Navigation

    +
    +
    +create_bct_mzn_constraint_from_component_ids()
    +
    +
    property description
    @@ -405,7 +440,7 @@

    Navigation

    x_15 <= x_48, ... -2 <= -1*x_0 - x_16 - x_17 + x_32 + x_63, -x_64 == 10*x_49 + 10*x_50 + 10*x_51 + 10*x_52 + 10*x_53 + 10*x_54 + 10*x_55 + 10*x_56 + 10*x_57 + 10*x_58 + 10*x_59 + 10*x_60 + 10*x_61 + 10*x_62 + 10*x_63] +x_64 == 100*x_49 + 100*x_50 + 100*x_51 + 100*x_52 + 100*x_53 + 100*x_54 + 100*x_55 + 100*x_56 + 100*x_57 + 100*x_58 + 100*x_59 + 100*x_60 + 100*x_61 + 100*x_62 + 100*x_63]
    @@ -439,7 +474,7 @@

    Navigation

    ... -4 <= x_15 + x_31 + x_47 + x_63 + x_64, x_65 == x_48 + x_49 + x_50 + x_51 + x_52 + x_53 + x_54 + x_55 + x_56 + x_57 + x_58 + x_59 + x_60 + x_61 + x_62 + x_63, - x_66 == 10*x_65] + x_66 == 100*x_65]
    @@ -495,6 +530,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for Modular Addition +in DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: modadd_component = speck.component_from(0, 1)
    +sage: modadd_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['modadd_0_1_0_0',
    +  'modadd_0_1_1_0',
    +  'modadd_0_1_2_0',
    +  ...
    +  'rot_0_0_15_0 plaintext_31_0 -rot_0_0_15_1 -modadd_0_1_15_0',
    +  'rot_0_0_15_0 plaintext_31_0 -plaintext_31_1 -modadd_0_1_15_0',
    +  'modadd_0_1_15_0 -rot_0_0_15_1 -plaintext_31_1 -modadd_0_1_15_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -767,13 +831,13 @@

    Navigation

    This Page

    @@ -791,7 +855,7 @@

    Quick search

    - +
    @@ -806,10 +870,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -817,7 +881,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/modular_component.html b/docs/build/html/components/modular_component.html index 5c1f0393..05b76222 100644 --- a/docs/build/html/components/modular_component.html +++ b/docs/build/html/components/modular_component.html @@ -1,23 +1,24 @@ - + - Modular component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Modular component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Modular component

    +

    Modular component

    class Modular(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, operation, modulus)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    as_python_dictionary()
    @@ -192,6 +193,11 @@

    Navigation

    +
    +
    +create_bct_mzn_constraint_from_component_ids()
    +
    +
    property description
    @@ -336,7 +342,7 @@

    Navigation

    x_15 <= x_48, ... -2 <= -1*x_0 - x_16 - x_17 + x_32 + x_63, -x_64 == 10*x_49 + 10*x_50 + 10*x_51 + 10*x_52 + 10*x_53 + 10*x_54 + 10*x_55 + 10*x_56 + 10*x_57 + 10*x_58 + 10*x_59 + 10*x_60 + 10*x_61 + 10*x_62 + 10*x_63] +x_64 == 100*x_49 + 100*x_50 + 100*x_51 + 100*x_52 + 100*x_53 + 100*x_54 + 100*x_55 + 100*x_56 + 100*x_57 + 100*x_58 + 100*x_59 + 100*x_60 + 100*x_61 + 100*x_62 + 100*x_63]
    @@ -370,7 +376,7 @@

    Navigation

    ... -4 <= x_15 + x_31 + x_47 + x_63 + x_64, x_65 == x_48 + x_49 + x_50 + x_51 + x_52 + x_53 + x_54 + x_55 + x_56 + x_57 + x_58 + x_59 + x_60 + x_61 + x_62 + x_63, - x_66 == 10*x_65] + x_66 == 100*x_65] @@ -426,6 +432,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for Modular Addition +in DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: modadd_component = speck.component_from(0, 1)
    +sage: modadd_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['modadd_0_1_0_0',
    +  'modadd_0_1_1_0',
    +  'modadd_0_1_2_0',
    +  ...
    +  'rot_0_0_15_0 plaintext_31_0 -rot_0_0_15_1 -modadd_0_1_15_0',
    +  'rot_0_0_15_0 plaintext_31_0 -plaintext_31_1 -modadd_0_1_15_0',
    +  'modadd_0_1_15_0 -rot_0_0_15_1 -plaintext_31_1 -modadd_0_1_15_1'])
    +
    +
    +
    +
    sat_xor_differential_propagation_constraints(model)
    @@ -619,6 +654,11 @@

    Navigation

    +
    +
    +generate_constraints_for_window_size_with_full_windows(first_addend, second_addend, result, aux_var)
    +
    +
    generic_sign_linear_constraints(inputs, outputs)
    @@ -659,13 +699,13 @@

    Navigation

    Previous topic

    -

    Fsr component

    +

    Modsub component

    This Page

    @@ -683,7 +723,7 @@

    Quick search

    - +
    @@ -698,10 +738,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -709,7 +749,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/multi_input_non_linear_logical_operator_component.html b/docs/build/html/components/multi_input_non_linear_logical_operator_component.html index 86b4996d..8886ad1e 100644 --- a/docs/build/html/components/multi_input_non_linear_logical_operator_component.html +++ b/docs/build/html/components/multi_input_non_linear_logical_operator_component.html @@ -1,23 +1,24 @@ - + - Multi input non linear logical operator component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Multi input non linear logical operator component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Multi input non linear logical operator component

    +

    Multi input non linear logical operator component

    class MultiInputNonlinearLogicalOperator(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, operation)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    as_python_dictionary()
    @@ -270,7 +271,7 @@

    Navigation

    [0 <= -1*x_32 + x_48, 0 <= -1*x_33 + x_49, ... -x_64 == 10*x_48 + 10*x_49 + 10*x_50 + 10*x_51 + 10*x_52 + 10*x_53 + 10*x_54 + 10*x_55 + 10*x_56 + 10*x_57 + 10*x_58 + 10*x_59 + 10*x_60 + 10*x_61 + 10*x_62 + 10*x_63] +x_64 == 100*x_48 + 100*x_49 + 100*x_50 + 100*x_51 + 100*x_52 + 100*x_53 + 100*x_54 + 100*x_55 + 100*x_56 + 100*x_57 + 100*x_58 + 100*x_59 + 100*x_60 + 100*x_61 + 100*x_62 + 100*x_63]
    @@ -311,7 +312,7 @@

    Navigation

    ... 0 <= -1*x_15 + x_47, x_48 == x_32 + x_33 + x_34 + x_35 + x_36 + x_37 + x_38 + x_39 + x_40 + x_41 + x_42 + x_43 + x_44 + x_45 + x_46 + x_47, -x_49 == 10*x_48] +x_49 == 100*x_48] @@ -346,6 +347,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for AND/OR in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=3)
    +sage: and_component = fancy.component_from(0, 8)
    +sage: and_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['and_0_8_0_0',
    +  'and_0_8_1_0',
    +  'and_0_8_2_0',
    +  ...
    +  'and_0_8_11_0 -key_23_1',
    +  'and_0_8_11_0 -and_0_8_11_1',
    +  'xor_0_7_11_0 key_23_0 xor_0_7_11_1 key_23_1 -and_0_8_11_0'])
    +
    +
    +
    +
    sat_constraints()
    @@ -538,13 +568,13 @@

    Navigation

    Previous topic

    -

    Modadd component

    +

    Shift component

    This Page

    @@ -562,7 +592,7 @@

    Quick search

    - +
    @@ -577,10 +607,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -588,7 +618,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/not_component.html b/docs/build/html/components/not_component.html index 62117daf..11f3e378 100644 --- a/docs/build/html/components/not_component.html +++ b/docs/build/html/components/not_component.html @@ -1,23 +1,24 @@ - + - Not component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Not component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Not component

    +

    Not component

    class NOT(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    algebraic_polynomials(model)
    @@ -494,6 +495,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for NOT in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.permutations.gift_permutation import GiftPermutation
    +sage: gift = GiftPermutation(number_of_rounds=3)
    +sage: not_component = gift.component_from(0, 8)
    +sage: not_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['not_0_8_0_0',
    +  'not_0_8_1_0',
    +  'not_0_8_2_0',
    +  ...
    +  'xor_0_6_30_0 -xor_0_6_30_1 -not_0_8_30_1',
    +  'xor_0_6_31_0 xor_0_6_31_1 not_0_8_31_1',
    +  'xor_0_6_31_0 -xor_0_6_31_1 -not_0_8_31_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -712,13 +742,13 @@

    Navigation

    This Page

    @@ -736,7 +766,7 @@

    Quick search

    - +
    @@ -751,10 +781,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -762,7 +792,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/or_component.html b/docs/build/html/components/or_component.html index 385005c4..8579cc0f 100644 --- a/docs/build/html/components/or_component.html +++ b/docs/build/html/components/or_component.html @@ -1,23 +1,24 @@ - + - Or component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Or component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Or component

    +

    Or component

    class OR(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size)
    -

    Bases: claasp.components.multi_input_non_linear_logical_operator_component.MultiInputNonlinearLogicalOperator

    +

    Bases: MultiInputNonlinearLogicalOperator

    algebraic_polynomials(model)
    @@ -76,11 +77,38 @@

    Navigation

    sage: or_component = gift.get_component_from_id("or_0_4") sage: algebraic = AlgebraicModel(gift) sage: or_component.algebraic_polynomials(algebraic) -[or_0_4_y0 + 1, - or_0_4_y1 + 1, - ... - or_0_4_y30 + 1, - or_0_4_y31 + 1] +[or_0_4_x0*or_0_4_x32 + or_0_4_y0 + or_0_4_x32 + or_0_4_x0, + or_0_4_x1*or_0_4_x33 + or_0_4_y1 + or_0_4_x33 + or_0_4_x1, + or_0_4_x2*or_0_4_x34 + or_0_4_y2 + or_0_4_x34 + or_0_4_x2, + or_0_4_x3*or_0_4_x35 + or_0_4_y3 + or_0_4_x35 + or_0_4_x3, + or_0_4_x4*or_0_4_x36 + or_0_4_y4 + or_0_4_x36 + or_0_4_x4, + or_0_4_x5*or_0_4_x37 + or_0_4_y5 + or_0_4_x37 + or_0_4_x5, + or_0_4_x6*or_0_4_x38 + or_0_4_y6 + or_0_4_x38 + or_0_4_x6, + or_0_4_x7*or_0_4_x39 + or_0_4_y7 + or_0_4_x39 + or_0_4_x7, + or_0_4_x8*or_0_4_x40 + or_0_4_y8 + or_0_4_x40 + or_0_4_x8, + or_0_4_x9*or_0_4_x41 + or_0_4_y9 + or_0_4_x41 + or_0_4_x9, + or_0_4_x10*or_0_4_x42 + or_0_4_y10 + or_0_4_x42 + or_0_4_x10, + or_0_4_x11*or_0_4_x43 + or_0_4_y11 + or_0_4_x43 + or_0_4_x11, + or_0_4_x12*or_0_4_x44 + or_0_4_y12 + or_0_4_x44 + or_0_4_x12, + or_0_4_x13*or_0_4_x45 + or_0_4_y13 + or_0_4_x45 + or_0_4_x13, + or_0_4_x14*or_0_4_x46 + or_0_4_y14 + or_0_4_x46 + or_0_4_x14, + or_0_4_x15*or_0_4_x47 + or_0_4_y15 + or_0_4_x47 + or_0_4_x15, + or_0_4_x16*or_0_4_x48 + or_0_4_y16 + or_0_4_x48 + or_0_4_x16, + or_0_4_x17*or_0_4_x49 + or_0_4_y17 + or_0_4_x49 + or_0_4_x17, + or_0_4_x18*or_0_4_x50 + or_0_4_y18 + or_0_4_x50 + or_0_4_x18, + or_0_4_x19*or_0_4_x51 + or_0_4_y19 + or_0_4_x51 + or_0_4_x19, + or_0_4_x20*or_0_4_x52 + or_0_4_y20 + or_0_4_x52 + or_0_4_x20, + or_0_4_x21*or_0_4_x53 + or_0_4_y21 + or_0_4_x53 + or_0_4_x21, + or_0_4_x22*or_0_4_x54 + or_0_4_y22 + or_0_4_x54 + or_0_4_x22, + or_0_4_x23*or_0_4_x55 + or_0_4_y23 + or_0_4_x55 + or_0_4_x23, + or_0_4_x24*or_0_4_x56 + or_0_4_y24 + or_0_4_x56 + or_0_4_x24, + or_0_4_x25*or_0_4_x57 + or_0_4_y25 + or_0_4_x57 + or_0_4_x25, + or_0_4_x26*or_0_4_x58 + or_0_4_y26 + or_0_4_x58 + or_0_4_x26, + or_0_4_x27*or_0_4_x59 + or_0_4_y27 + or_0_4_x59 + or_0_4_x27, + or_0_4_x28*or_0_4_x60 + or_0_4_y28 + or_0_4_x60 + or_0_4_x28, + or_0_4_x29*or_0_4_x61 + or_0_4_y29 + or_0_4_x61 + or_0_4_x29, + or_0_4_x30*or_0_4_x62 + or_0_4_y30 + or_0_4_x62 + or_0_4_x30, + or_0_4_x31*or_0_4_x63 + or_0_4_y31 + or_0_4_x63 + or_0_4_x31]
    @@ -367,7 +395,7 @@

    Navigation

    [0 <= -1*x_32 + x_48, 0 <= -1*x_33 + x_49, ... -x_64 == 10*x_48 + 10*x_49 + 10*x_50 + 10*x_51 + 10*x_52 + 10*x_53 + 10*x_54 + 10*x_55 + 10*x_56 + 10*x_57 + 10*x_58 + 10*x_59 + 10*x_60 + 10*x_61 + 10*x_62 + 10*x_63] +x_64 == 100*x_48 + 100*x_49 + 100*x_50 + 100*x_51 + 100*x_52 + 100*x_53 + 100*x_54 + 100*x_55 + 100*x_56 + 100*x_57 + 100*x_58 + 100*x_59 + 100*x_60 + 100*x_61 + 100*x_62 + 100*x_63] @@ -408,7 +436,7 @@

    Navigation

    ... 0 <= -1*x_15 + x_47, x_48 == x_32 + x_33 + x_34 + x_35 + x_36 + x_37 + x_38 + x_39 + x_40 + x_41 + x_42 + x_43 + x_44 + x_45 + x_46 + x_47, -x_49 == 10*x_48] +x_49 == 100*x_48] @@ -444,10 +472,10 @@

    Navigation

    -
    -sat_constraints()
    -

    Return a list of variables and a list of clauses for AND operation in SAT CIPHER model.

    -

    This method support AND operation using more than two operands.

    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for AND/OR in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    See also

    SAT standard of Cipher for the format.

    @@ -460,14 +488,43 @@

    Navigation

    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
     sage: fancy = FancyBlockCipher(number_of_rounds=3)
     sage: and_component = fancy.component_from(0, 8)
    -sage: and_component.sat_constraints()
    -(['and_0_8_0',
    -  'and_0_8_1',
    -  'and_0_8_2',
    +sage: and_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['and_0_8_0_0',
    +  'and_0_8_1_0',
    +  'and_0_8_2_0',
       ...
    -  '-and_0_8_11 xor_0_7_11',
    -  '-and_0_8_11 key_23',
    -  'and_0_8_11 -xor_0_7_11 -key_23'])
    +  'and_0_8_11_0 -key_23_1',
    +  'and_0_8_11_0 -and_0_8_11_1',
    +  'xor_0_7_11_0 key_23_0 xor_0_7_11_1 key_23_1 -and_0_8_11_0'])
    +
    +
    +
    + +
    +
    +sat_constraints()
    +

    Return a list of variables and a list of clauses for OR operation in SAT CIPHER model.

    +

    This method support AND operation using more than two operands.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.permutations.gift_permutation import GiftPermutation
    +sage: gift = GiftPermutation(number_of_rounds=3)
    +sage: or_component = gift.component_from(0, 4)
    +sage: or_component.sat_constraints()
    +(['or_0_4_0',
    +  'or_0_4_1',
    +  'or_0_4_2',
    +  ...
    +  'or_0_4_31 -xor_0_3_31',
    +  'or_0_4_31 -xor_0_1_31',
    +  '-or_0_4_31 xor_0_3_31 xor_0_1_31'])
     
    @@ -663,13 +720,13 @@

    Navigation

    This Page

    @@ -687,7 +744,7 @@

    Quick search

    - +
    @@ -702,10 +759,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -713,7 +770,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/permutation_component.html b/docs/build/html/components/permutation_component.html index c32aa230..5fc8b7dd 100644 --- a/docs/build/html/components/permutation_component.html +++ b/docs/build/html/components/permutation_component.html @@ -1,23 +1,24 @@ - + - Permutation component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Permutation component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Permutation component

    +

    Permutation component

    class Permutation(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, permutation_description)
    -

    Bases: claasp.components.linear_layer_component.LinearLayer

    +

    Bases: LinearLayer

    algebraic_polynomials(model)
    @@ -473,6 +474,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for LINEAR LAYER in +SAT DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=3)
    +sage: linear_layer_component = fancy.component_from(0, 6)
    +sage: constraints = linear_layer_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +sage: constraints[1][11]
    +'inter_0_linear_layer_0_6_0_1 inter_1_linear_layer_0_6_0_0 inter_1_linear_layer_0_6_0_1 -sbox_0_1_0_1'
    +
    +
    +
    +
    sat_constraints()
    @@ -647,13 +672,13 @@

    Navigation

    This Page

    @@ -671,7 +696,7 @@

    Quick search

    - +
    @@ -686,10 +711,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -697,7 +722,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/reverse_component.html b/docs/build/html/components/reverse_component.html index 99c31d6a..83303403 100644 --- a/docs/build/html/components/reverse_component.html +++ b/docs/build/html/components/reverse_component.html @@ -1,23 +1,24 @@ - + - Reverse component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Reverse component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Reverse component

    +

    Reverse component

    class Reverse(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size)
    -

    Bases: claasp.components.linear_layer_component.LinearLayer

    +

    Bases: LinearLayer

    algebraic_polynomials(model)
    @@ -473,6 +474,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for LINEAR LAYER in +SAT DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=3)
    +sage: linear_layer_component = fancy.component_from(0, 6)
    +sage: constraints = linear_layer_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +sage: constraints[1][11]
    +'inter_0_linear_layer_0_6_0_1 inter_1_linear_layer_0_6_0_0 inter_1_linear_layer_0_6_0_1 -sbox_0_1_0_1'
    +
    +
    +
    +
    sat_constraints()
    @@ -647,13 +672,13 @@

    Navigation

    Previous topic

    -

    Theta keccak component

    +

    Round

    This Page

    @@ -671,7 +696,7 @@

    Quick search

    - +
    @@ -686,10 +711,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -697,7 +722,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/rotate_component.html b/docs/build/html/components/rotate_component.html index 570b5350..926cafd5 100644 --- a/docs/build/html/components/rotate_component.html +++ b/docs/build/html/components/rotate_component.html @@ -1,23 +1,24 @@ - + - Rotate component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Rotate component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Rotate component

    +

    Rotate component

    class Rotate(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, parameter)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    algebraic_polynomials(model)
    @@ -525,6 +526,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for ROTATION in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: rotate_component = speck.component_from(1, 1)
    +sage: rotate_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['rot_1_1_0_0',
    +  'rot_1_1_1_0',
    +  'rot_1_1_2_0',
    +  ...
    +  'key_39_1 -rot_1_1_14_1',
    +  'rot_1_1_15_1 -key_40_1',
    +  'key_40_1 -rot_1_1_15_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -553,11 +583,6 @@

    Navigation

    -
    -
    -sat_deterministic_truncated_xor_differential_trail_constraints()
    -
    -
    sat_xor_differential_propagation_constraints(model=None)
    @@ -703,13 +728,13 @@

    Navigation

    Previous topic

    -

    Toyspn1

    +

    Reverse component

    This Page

    @@ -727,7 +752,7 @@

    Quick search

    - +
    @@ -742,10 +767,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -753,7 +778,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/sbox_component.html b/docs/build/html/components/sbox_component.html index c4b1cc34..d7234cee 100644 --- a/docs/build/html/components/sbox_component.html +++ b/docs/build/html/components/sbox_component.html @@ -1,23 +1,24 @@ - + - Sbox component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sbox component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Sbox component

    +

    Sbox component

    class SBOX(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, s_box_description)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    algebraic_polynomials(model)
    @@ -135,7 +136,7 @@

    Navigation

    -cp_constraints(sbox_mant)
    +cp_constraints(sbox_mant, second=False)

    Return lists of declarations and constraints for SBOX component for CP CIPHER model.

    INPUT:

      @@ -154,7 +155,7 @@

      Navigation

      -cp_deterministic_truncated_xor_differential_constraints(inverse=False)
      +cp_deterministic_truncated_xor_differential_constraints(sbox_mant, inverse=False)

      Return lists of declarations and constraints for SBOX component for CP deterministic truncated xor differential.

      INPUT:

        @@ -166,14 +167,18 @@

        Navigation

        sage: sbox_component = aes.component_from(0, 1) sage: sbox_component.cp_deterministic_truncated_xor_differential_constraints() ([], - ['constraint if xor_0_0[0] == 0 /\\ xor_0_0[1] == 0 /\\ xor_0_0[2] == 0 /\\ xor_0_0[3] == 0 /\\ xor_0_0[4] == 0 /\\ xor_0_0[5] == 0 /\\ xor_0_0[6] == 0 /\\ xor_0_0[7] then forall(i in 0..7)(sbox_0_1[i] = 0) else forall(i in 0..7)(sbox_0_1[i] = 2) endif;']) + ['constraint table(xor_0_0[0]++xor_0_0[1]++xor_0_0[2]++xor_0_0[3]++xor_0_0[4]++xor_0_0[5]++xor_0_0[6]++xor_0_0[7]++' + '[sbox_0_1[0]]++[sbox_0_1[1]]++[sbox_0_1[2]]++[sbox_0_1[3]]++[sbox_0_1[4]]++[sbox_0_1[5]]++[sbox_0_1[6]]++[sbox_0_1[7]], ' + '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,2,2,2,2,2,2' + '...' + '2,2,0,2,1,2,1,2,2,2,2,2,2,2,2,2,1,0,2,2,1,2,2,2,2,2,2,2,2,2,2,2);'])
    -cp_deterministic_truncated_xor_differential_trail_constraints()
    +cp_deterministic_truncated_xor_differential_trail_constraints(sbox_mant, inverse=False)
    @@ -397,7 +402,7 @@

    Navigation

    -milp_large_xor_differential_probability_constraints(binary_variable, integer_variable, non_linear_component_id)
    +milp_large_xor_differential_probability_constraints(binary_variable, integer_variable, non_linear_component_id, weight_precision=2)

    Return lists of variables and constrains modeling SBOX component, with input bit size less or equal to 6.

    Note

    @@ -411,6 +416,7 @@

    Navigation

  • binary_variableboolean MIPVariable object

  • integer_variableboolean MIPVariable object

  • non_linear_component_idstring

  • +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    @@ -440,7 +446,7 @@ 

    Navigation

    -milp_large_xor_linear_probability_constraints(binary_variable, integer_variable, non_linear_component_id)
    +milp_large_xor_linear_probability_constraints(binary_variable, integer_variable, non_linear_component_id, weight_precision=2)

    Return lists of variables and constrains modeling SBOX component, with input bit size less or equal to 6.

    Note

    @@ -454,6 +460,7 @@

    Navigation

  • binary_variableboolean MIPVariable object

  • integer_variableinteger MIPVariable object

  • non_linear_component_idstring

  • +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.aes_block_cipher import AESBlockCipher
    @@ -475,14 +482,14 @@ 

    Navigation

    1 - x_0 - x_1 - x_2 - x_3 - x_4 - x_5 - x_6 - x_7 <= 8 - 8*x_16, ... x_17 + x_18 + x_19 + x_20 + x_21 + x_22 + x_23 + x_24 + x_25 + x_26 + x_27 + x_28 + x_29 + x_30 + x_31 + x_32 == x_16, -x_33 == 60*x_17 + 50*x_18 + 44*x_19 + 40*x_20 + 37*x_21 + 34*x_22 + 32*x_23 + 30*x_24 + 30*x_25 + 32*x_26 + 34*x_27 + 37*x_28 + 40*x_29 + 44*x_30 + 50*x_31 + 60*x_32] +x_33 == 600*x_17 + 500*x_18 + 442*x_19 + 400*x_20 + 368*x_21 + 342*x_22 + 319*x_23 + 300*x_24 + 300*x_25 + 319*x_26 + 342*x_27 + 368*x_28 + 400*x_29 + 442*x_30 + 500*x_31 + 600*x_32]
    -milp_small_xor_differential_probability_constraints(binary_variable, integer_variable, non_linear_component_id)
    +milp_small_xor_differential_probability_constraints(binary_variable, integer_variable, non_linear_component_id, weight_precision=2)

    Return a list of variables and a list of constrains modeling a component of type SBOX.

    Note

    @@ -496,6 +503,7 @@

    Navigation

  • binary_variableboolean MIPVariable object

  • integer_variableinteger MIPVariable object

  • non_linear_component_idstring

  • +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.present_block_cipher import PresentBlockCipher
    @@ -524,7 +532,7 @@ 

    Navigation

    -milp_small_xor_linear_probability_constraints(binary_variable, integer_variable, non_linear_component_id)
    +milp_small_xor_linear_probability_constraints(binary_variable, integer_variable, non_linear_component_id, weight_precision=2)

    Return a list of variables and a list of constrains modeling a component of type Sbox.

    Note

    @@ -539,6 +547,7 @@

    Navigation

  • binary_variableMIPVariable object

  • integer_variableMIPVariable object

  • non_linear_component_idlist

  • +
  • weight_precisioninteger (default: 2); the number of decimals to use when rounding the weight of the trail.

  • EXAMPLES:

    sage: from claasp.ciphers.block_ciphers.present_block_cipher import PresentBlockCipher
    @@ -560,7 +569,7 @@ 

    Navigation

    x_0 <= x_8, ... x_9 + x_10 + x_11 + x_12 == x_8, -x_13 == 20*x_9 + 10*x_10 + 10*x_11 + 20*x_12] +x_13 == 200*x_9 + 100*x_10 + 100*x_11 + 200*x_12]
    @@ -750,7 +759,7 @@

    Navigation

    x_0 <= x_8, ... x_9 + x_10 + x_11 + x_12 == x_8, -x_13 == 20*x_9 + 10*x_10 + 10*x_11 + 20*x_12] +x_13 == 200*x_9 + 100*x_10 + 100*x_11 + 200*x_12]
    @@ -785,6 +794,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for a generic S-BOX in SAT deterministic truncated XOR DIFFERENTIAL model.

    +

    INPUT:

    +
      +
    • modelmodel object; a model instance

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.present_block_cipher import PresentBlockCipher
    +sage: present = PresentBlockCipher(number_of_rounds=3)
    +sage: sbox_component = present.component_from(0, 2)
    +sage: sbox_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['sbox_0_2_0_0',
    +  'sbox_0_2_1_0',
    +  'sbox_0_2_2_0',
    +  ...
    +  '-xor_0_0_6_0 sbox_0_2_3_0',
    +  '-xor_0_0_5_0 sbox_0_2_3_0',
    +  '-xor_0_0_4_0 sbox_0_2_3_0'])
    +
    +
    +
    +
    sat_constraints()
    @@ -1010,7 +1043,12 @@

    Navigation

    -milp_large_xor_probability_constraint_for_inequality(M, component_id, ineq, input_vars, output_vars, proba, sbox_input_size, x)
    +milp_large_xor_probability_constraint_for_inequality(M, component_id, ineq, input_vars, output_vars, proba, sbox_input_size, sbox_output_size, x) +
    + +
    +
    +milp_set_constraints_from_dictionnary_for_large_sbox(component_id, input_vars, output_vars, sbox_input_size, sbox_output_size, x, p, probability_dictionary, analysis, weight_precision)
    @@ -1039,13 +1077,13 @@

    Navigation

    This Page

    @@ -1063,7 +1101,7 @@

    Quick search

    - +
    @@ -1078,10 +1116,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1089,7 +1127,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/shift_component.html b/docs/build/html/components/shift_component.html index df5ace14..91ebc9c3 100644 --- a/docs/build/html/components/shift_component.html +++ b/docs/build/html/components/shift_component.html @@ -1,22 +1,23 @@ - + - Shift component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Shift component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -33,7 +34,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + @@ -56,11 +57,11 @@

    Navigation

    -

    Shift component

    +

    Shift component

    class SHIFT(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, parameter)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    algebraic_polynomials(model)
    @@ -535,6 +536,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for SHIFT in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.tea_block_cipher import TeaBlockCipher
    +sage: tea = TeaBlockCipher(number_of_rounds=3)
    +sage: shift_component = tea.component_from(0, 0)
    +sage: shift_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['shift_0_0_0_0',
    +  'shift_0_0_1_0',
    +  'shift_0_0_2_0',
    +  ...
    +  '-shift_0_0_30_1',
    +  '-shift_0_0_31_0',
    +  '-shift_0_0_31_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -563,11 +593,6 @@

    Navigation

    -
    -
    -sat_deterministic_truncated_xor_differential_trail_constraints()
    -
    -
    sat_xor_differential_propagation_constraints(model=None)
    @@ -721,8 +746,8 @@

    Previous topic

    This Page

    @@ -740,7 +765,7 @@

    Quick search

    - +
    @@ -755,7 +780,7 @@

    Navigation

    modules |
  • - next |
  • Navigation -
  • + diff --git a/docs/build/html/components/shift_rows_component.html b/docs/build/html/components/shift_rows_component.html index dff0b95d..7fe61417 100644 --- a/docs/build/html/components/shift_rows_component.html +++ b/docs/build/html/components/shift_rows_component.html @@ -1,23 +1,24 @@ - + - Shift rows component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Shift rows component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Shift rows component

    +

    Shift rows component

    class ShiftRows(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, parameter)
    -

    Bases: claasp.components.rotate_component.Rotate

    +

    Bases: Rotate

    algebraic_polynomials(model)
    @@ -525,6 +526,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for ROTATION in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: rotate_component = speck.component_from(1, 1)
    +sage: rotate_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['rot_1_1_0_0',
    +  'rot_1_1_1_0',
    +  'rot_1_1_2_0',
    +  ...
    +  'key_39_1 -rot_1_1_14_1',
    +  'rot_1_1_15_1 -key_40_1',
    +  'key_40_1 -rot_1_1_15_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -553,11 +583,6 @@

    Navigation

    -
    -
    -sat_deterministic_truncated_xor_differential_trail_constraints()
    -
    -
    sat_xor_differential_propagation_constraints(model=None)
    @@ -703,13 +728,13 @@

    Navigation

    Previous topic

    -

    Rotate component

    +

    Fsr component

    This Page

    @@ -727,7 +752,7 @@

    Quick search

    - +
    @@ -742,10 +767,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -753,7 +778,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/sigma_component.html b/docs/build/html/components/sigma_component.html index 9ae55ac2..fd23ca03 100644 --- a/docs/build/html/components/sigma_component.html +++ b/docs/build/html/components/sigma_component.html @@ -1,23 +1,24 @@ - + - Sigma component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Sigma component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Sigma component

    +

    Sigma component

    class Sigma(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, rotation_amounts_parameter)
    -

    Bases: claasp.components.linear_layer_component.LinearLayer

    +

    Bases: LinearLayer

    algebraic_polynomials(model)
    @@ -473,6 +474,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for LINEAR LAYER in +SAT DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=3)
    +sage: linear_layer_component = fancy.component_from(0, 6)
    +sage: constraints = linear_layer_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +sage: constraints[1][11]
    +'inter_0_linear_layer_0_6_0_1 inter_1_linear_layer_0_6_0_0 inter_1_linear_layer_0_6_0_1 -sbox_0_1_0_1'
    +
    +
    +
    +
    sat_constraints()
    @@ -647,13 +672,13 @@

    Navigation

    This Page

    @@ -671,7 +696,7 @@

    Quick search

    - +
    @@ -686,10 +711,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -697,7 +722,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/theta_keccak_component.html b/docs/build/html/components/theta_keccak_component.html index f3baedde..aca7cc3a 100644 --- a/docs/build/html/components/theta_keccak_component.html +++ b/docs/build/html/components/theta_keccak_component.html @@ -1,23 +1,24 @@ - + - Theta keccak component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Theta keccak component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Theta keccak component

    +

    Theta keccak component

    class ThetaKeccak(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size)
    -

    Bases: claasp.components.linear_layer_component.LinearLayer

    +

    Bases: LinearLayer

    algebraic_polynomials(model)
    @@ -473,6 +474,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for LINEAR LAYER in +SAT DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=3)
    +sage: linear_layer_component = fancy.component_from(0, 6)
    +sage: constraints = linear_layer_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +sage: constraints[1][11]
    +'inter_0_linear_layer_0_6_0_1 inter_1_linear_layer_0_6_0_0 inter_1_linear_layer_0_6_0_1 -sbox_0_1_0_1'
    +
    +
    +
    +
    sat_constraints()
    @@ -647,13 +672,13 @@

    Navigation

    This Page

    @@ -671,7 +696,7 @@

    Quick search

    - +
    @@ -686,10 +711,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -697,7 +722,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/theta_xoodoo_component.html b/docs/build/html/components/theta_xoodoo_component.html index 56ca55da..3e2a263b 100644 --- a/docs/build/html/components/theta_xoodoo_component.html +++ b/docs/build/html/components/theta_xoodoo_component.html @@ -1,23 +1,24 @@ - + - Theta xoodoo component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Theta xoodoo component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Theta xoodoo component

    +

    Theta xoodoo component

    class ThetaXoodoo(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size)
    -

    Bases: claasp.components.linear_layer_component.LinearLayer

    +

    Bases: LinearLayer

    algebraic_polynomials(model)
    @@ -473,6 +474,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for LINEAR LAYER in +SAT DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.fancy_block_cipher import FancyBlockCipher
    +sage: fancy = FancyBlockCipher(number_of_rounds=3)
    +sage: linear_layer_component = fancy.component_from(0, 6)
    +sage: constraints = linear_layer_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +sage: constraints[1][11]
    +'inter_0_linear_layer_0_6_0_1 inter_1_linear_layer_0_6_0_0 inter_1_linear_layer_0_6_0_1 -sbox_0_1_0_1'
    +
    +
    +
    +
    sat_constraints()
    @@ -647,13 +672,13 @@

    Navigation

    Next topic

    -

    Fsr component

    +

    Or component

    This Page

    @@ -671,7 +696,7 @@

    Quick search

    - +
    @@ -686,10 +711,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -697,7 +722,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/variable_rotate_component.html b/docs/build/html/components/variable_rotate_component.html index e5b8b21a..bc094a0b 100644 --- a/docs/build/html/components/variable_rotate_component.html +++ b/docs/build/html/components/variable_rotate_component.html @@ -1,23 +1,24 @@ - + - Variable rotate component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Variable rotate component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Variable rotate component

    +

    Variable rotate component

    class VariableRotate(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, parameter)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    as_python_dictionary()
    @@ -209,13 +210,13 @@

    Navigation

    This Page

    @@ -233,7 +234,7 @@

    Quick search

    - +
    @@ -248,10 +249,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -259,7 +260,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/variable_shift_component.html b/docs/build/html/components/variable_shift_component.html index f5806bdc..fae30774 100644 --- a/docs/build/html/components/variable_shift_component.html +++ b/docs/build/html/components/variable_shift_component.html @@ -1,23 +1,24 @@ - + - Variable shift component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Variable shift component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Variable shift component

    +

    Variable shift component

    class VariableShift(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, parameter)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    as_python_dictionary()
    @@ -347,13 +348,13 @@

    Navigation

    This Page

    @@ -371,7 +372,7 @@

    Quick search

    - +
    @@ -386,10 +387,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -397,7 +398,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/word_permutation_component.html b/docs/build/html/components/word_permutation_component.html index 61297224..947615a4 100644 --- a/docs/build/html/components/word_permutation_component.html +++ b/docs/build/html/components/word_permutation_component.html @@ -1,23 +1,24 @@ - + - Word permutation component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Word permutation component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Word permutation component

    +

    Word permutation component

    class WordPermutation(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size, permutation_description, word_size)
    -

    Bases: claasp.components.mix_column_component.MixColumn

    +

    Bases: MixColumn

    algebraic_polynomials(model)
    @@ -509,6 +510,30 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for MIX COLUMN in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.midori_block_cipher import MidoriBlockCipher
    +sage: midori = MidoriBlockCipher(number_of_rounds=3)
    +sage: mix_column_component = midori.component_from(0, 23)
    +sage: out_ids, constraints = mix_column_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +sage: constraints[7]
    +'mix_column_0_23_0_0 -inter_0_mix_column_0_23_0_0'
    +
    +
    +
    +
    sat_constraints()
    @@ -682,13 +707,13 @@

    Navigation

    This Page

    @@ -706,7 +731,7 @@

    Quick search

    - +
    @@ -721,10 +746,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -732,7 +757,7 @@

    Navigation

    - + diff --git a/docs/build/html/components/xor_component.html b/docs/build/html/components/xor_component.html index 95c3b5ff..f2ef6523 100644 --- a/docs/build/html/components/xor_component.html +++ b/docs/build/html/components/xor_component.html @@ -1,23 +1,24 @@ - + - Xor component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Xor component — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - + @@ -36,7 +37,7 @@

    Navigation

    next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,11 +57,11 @@

    Navigation

    -

    Xor component

    +

    Xor component

    class XOR(current_round_number, current_round_number_of_components, input_id_links, input_bit_positions, output_bit_size)
    -

    Bases: claasp.component.Component

    +

    Bases: Component

    algebraic_polynomials(model)
    @@ -665,6 +666,35 @@

    Navigation

    print_word_values(code)
    +
    +
    +sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +

    Return a list of variables and a list of clauses for XOR in SAT +DETERMINISTIC TRUNCATED XOR DIFFERENTIAL model.

    +
    +

    See also

    +

    SAT standard of Cipher for the format.

    +
    +

    INPUT:

    +
      +
    • None

    • +
    +

    EXAMPLES:

    +
    sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher
    +sage: speck = SpeckBlockCipher(number_of_rounds=3)
    +sage: xor_component = speck.component_from(0, 2)
    +sage: xor_component.sat_bitwise_deterministic_truncated_xor_differential_constraints()
    +(['xor_0_2_0_0',
    +  'xor_0_2_1_0',
    +  'xor_0_2_2_0',
    +  ...
    +  'modadd_0_1_15_1 xor_0_2_15_0 xor_0_2_15_1 -key_63_1',
    +  'key_63_1 xor_0_2_15_0 xor_0_2_15_1 -modadd_0_1_15_1',
    +  'xor_0_2_15_0 -modadd_0_1_15_1 -key_63_1 -xor_0_2_15_1'])
    +
    +
    +
    +
    sat_constraints()
    @@ -885,8 +915,8 @@

    Navigation

    Previous topic

    -

    Sigma component

    +

    Modadd component

    Next topic

    @@ -909,7 +939,7 @@

    Quick search

    - +
    @@ -927,7 +957,7 @@

    Navigation

    next |
  • - previous |
  • @@ -935,7 +965,7 @@

    Navigation

    - + diff --git a/docs/build/html/compound_xor_differential_cipher.html b/docs/build/html/compound_xor_differential_cipher.html index 77ea9111..3391b33f 100644 --- a/docs/build/html/compound_xor_differential_cipher.html +++ b/docs/build/html/compound_xor_differential_cipher.html @@ -1,23 +1,24 @@ - + - Compound xor differential cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Compound xor differential cipher — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Compound xor differential cipher

    +

    Compound xor differential cipher

    convert_to_compound_xor_cipher(cipher)
    @@ -98,13 +99,13 @@

    Navigation

    Next topic

    -

    Editor

    +

    Input

    This Page

    @@ -122,7 +123,7 @@

    Quick search

    - +
    @@ -137,10 +138,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -148,7 +149,7 @@

    Navigation

    - + diff --git a/docs/build/html/editor.html b/docs/build/html/editor.html index 59e21611..e871b3bb 100644 --- a/docs/build/html/editor.html +++ b/docs/build/html/editor.html @@ -1,23 +1,24 @@ - + - Editor — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Editor — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + - - + + @@ -33,10 +34,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -44,7 +45,7 @@

    Navigation

    - + @@ -56,7 +57,7 @@

    Navigation

    -

    Editor

    +

    Editor

    add_AND_component(cipher, input_id_links, input_bit_positions, output_bit_size)
    @@ -1481,13 +1482,13 @@

    Navigation

    - +
    @@ -1520,10 +1521,10 @@

    Navigation

    modules |
  • - next |
  • - previous |
  • @@ -1531,7 +1532,7 @@

    Navigation

    - + diff --git a/docs/build/html/genindex-A.html b/docs/build/html/genindex-A.html index ed486236..60e5ccb9 100644 --- a/docs/build/html/genindex-A.html +++ b/docs/build/html/genindex-A.html @@ -2,16 +2,17 @@ - + - Index — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Index — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + @@ -36,7 +37,7 @@

    Navigation

    - + @@ -53,6 +54,8 @@

    Index – A

    - +
    +
    @@ -4871,7 +4838,7 @@

    Quick search

    - +
    @@ -4891,7 +4858,7 @@

    Navigation

    - + diff --git a/docs/build/html/genindex-B.html b/docs/build/html/genindex-B.html index 038644d7..8ded8c2a 100644 --- a/docs/build/html/genindex-B.html +++ b/docs/build/html/genindex-B.html @@ -2,16 +2,17 @@ - + - Index — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Index — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + @@ -36,7 +37,7 @@

    Navigation

    - + @@ -156,10 +157,14 @@

    Index – B

  • build_all_xor_differential_trails_with_fixed_weight() (MinizincXorDifferentialModel method)
  • -
  • build_bitwise_deterministic_truncated_xor_differential_trail_model() (MilpBitwiseDeterministicTruncatedXorDifferentialModel method) +
  • build_bitwise_deterministic_truncated_xor_differential_trail_model() (CmsSatDeterministicTruncatedXorDifferentialModel method)
  • build_bitwise_impossible_xor_differential_trail_model() (MilpBitwiseImpossibleXorDifferentialModel method) @@ -180,26 +185,30 @@

    Index – B

  • build_code_for_components() (in module cipher_modules.code_generator)
  • - - + @@ -52,12 +53,12 @@

    Index – C

    -
  • clock_fsm() (Snow3GStreamCipher method) -
  • CmsSatCipherModel (class in cipher_modules.models.sat.cms_models.cms_cipher_model)
  • -
  • CmsSatDeterministicTruncatedXorDifferentialModel (class in cipher_modules.models.sat.cms_models.cms_deterministic_truncated_xor_differential_model) +
  • CmsSatDeterministicTruncatedXorDifferentialModel (class in cipher_modules.models.sat.cms_models.cms_bitwise_deterministic_truncated_xor_differential_model)
  • CmsSatXorDifferentialModel (class in cipher_modules.models.sat.cms_models.cms_xor_differential_model)
  • @@ -1769,9 +1905,9 @@

    Index – C

  • cnf_xor_seq() (in module cipher_modules.models.sat.utils.utils)
  • -
  • collect_component_operations() (in module cipher_modules.component_analysis_tests) +
  • cnf_xor_truncated() (in module cipher_modules.models.sat.utils.utils)
  • -
  • collect_components_with_the_same_operation() (in module cipher_modules.component_analysis_tests) +
  • cnf_xor_truncated_seq() (in module cipher_modules.models.sat.utils.utils)
  • collect_input_id_links() (FancyBlockCipher method)
  • @@ -1790,143 +1926,13 @@

    Index – C

  • Component (class in component)
  • -
  • component_analysis_tests() (A51StreamCipher method) - -
  • component_from() (A51StreamCipher method)
  • @@ -658,7 +719,7 @@

    Quick search

    - +
    @@ -678,7 +739,7 @@

    Navigation

    - + diff --git a/docs/build/html/genindex-F.html b/docs/build/html/genindex-F.html index 3eb85e48..d822be79 100644 --- a/docs/build/html/genindex-F.html +++ b/docs/build/html/genindex-F.html @@ -2,16 +2,17 @@ - + - Index — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives 1.1.0 documentation + Index — CLAASP: Cryptographic Library for Automated Analysis of Symmetric Primitives v2.5.0 documentation + @@ -36,7 +37,7 @@

    Navigation

    - + @@ -55,10 +56,14 @@

    Index – F

  • F() (MD5HashFunction method)
  • f() (SimonBlockCipher method) +
  • +
  • F_function() (SCARFBlockCipher method)
  • family_name (A51StreamCipher property)
  • find_one_wordwise_impossible_xor_differential_trail() (MilpWordwiseImpossibleXorDifferentialModel method)
  • -
  • find_one_wordwise_impossible_xor_differential_trail_with_fixed_component() (MilpWordwiseImpossibleXorDifferentialModel method) +
  • find_one_wordwise_impossible_xor_differential_trail_with_chosen_components() (MilpWordwiseImpossibleXorDifferentialModel method)
  • find_one_wordwise_impossible_xor_differential_trail_with_fully_automatic_model() (MilpWordwiseImpossibleXorDifferentialModel method)
  • @@ -820,6 +755,8 @@

    Index – F