From f673a52fa32a113fd47e1c4d3ec87adeb3a81b9a Mon Sep 17 00:00:00 2001 From: matteosz Date: Sat, 23 Mar 2024 17:26:32 +0100 Subject: [PATCH] Switched to uint32 for idx and th --- changelog.md | 23 ------- encoding.go | 8 +-- examples/dkg_test.go | 18 +++--- group/edwards25519/fe.go | 76 +++++++++++------------ group/edwards25519/ge.go | 3 +- group/edwards25519/ge_mult_vartime.go | 2 +- group/mod/int.go | 16 ++--- proof/deniable.go | 12 ++-- proof/deniable_test.go | 10 +-- proof/hash_test.go | 4 +- proof/proof.go | 43 +++++++------ proof/proof_test.go | 4 +- share/dkg/pedersen/dkg.go | 24 ++++---- share/dkg/pedersen/dkg_test.go | 28 ++++----- share/dkg/rabin/dkg_test.go | 12 ++-- share/poly.go | 48 +++++++-------- share/poly_test.go | 88 +++++++++++++-------------- share/pvss/pvss.go | 8 +-- share/pvss/pvss_test.go | 30 ++++----- share/vss/pedersen/vss.go | 22 +++---- share/vss/pedersen/vss_test.go | 60 +++++++++--------- share/vss/rabin/vss.go | 28 ++++----- share/vss/rabin/vss_test.go | 66 ++++++++++---------- shuffle/biffle.go | 6 +- shuffle/pair.go | 26 ++++---- sign/cosi/cosi.go | 20 +++--- sign/cosi/cosi_test.go | 4 +- sign/dss/dss.go | 14 ++--- sign/dss/dss_test.go | 12 ++-- sign/policy.go | 12 ++-- sign/policy_test.go | 8 +-- sign/tbls/tbls.go | 6 +- sign/tbls/tbls_test.go | 2 +- 33 files changed, 358 insertions(+), 385 deletions(-) delete mode 100644 changelog.md diff --git a/changelog.md b/changelog.md deleted file mode 100644 index 3247dbd53..000000000 --- a/changelog.md +++ /dev/null @@ -1,23 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased - v4] - -### Added - -- Added unit tests to increase test coverage for the packages util/encoding and util/encryption -- Added fuzzer tests for sign/tbls and sign/cosi packages -- Added two implementations for bls12-381 (circl and kilic) and benchmarks for comparing them -- Added some more benchmarks for the ecies, bn256 and proof modules - -### Changed - -- Migrated to sized integers for fields in structures to have x-compatibility and solve compilation on 32-bit architectures - -### Removed - -- \ No newline at end of file diff --git a/encoding.go b/encoding.go index 02cb4fbcd..3b927733d 100644 --- a/encoding.go +++ b/encoding.go @@ -36,10 +36,10 @@ type Marshaling interface { // will have different constraints, of course. Two implementations are // available: // -// 1. The protobuf encoding using the variable length Google Protobuf encoding -// scheme. The library is available at https://go.dedis.ch/protobuf -// 2. The fixbuf encoding, a fixed length binary encoding of arbitrary -// structures. The library is available at https://go.dedis.ch/fixbuf. +// 1. The protobuf encoding using the variable length Google Protobuf encoding +// scheme. The library is available at https://go.dedis.ch/protobuf +// 2. The fixbuf encoding, a fixed length binary encoding of arbitrary +// structures. The library is available at https://go.dedis.ch/fixbuf. type Encoding interface { // Encode and write objects to an io.Writer. Write(w io.Writer, objs ...interface{}) error diff --git a/examples/dkg_test.go b/examples/dkg_test.go index 8c2bf2eeb..d88d465a8 100644 --- a/examples/dkg_test.go +++ b/examples/dkg_test.go @@ -39,8 +39,8 @@ func Test_Example_DKG(t *testing.T) { // default number of node for this test nStr = "7" } - nUnszd, err := strconv.Atoi(nStr) - n := uint32(nUnszd) + nUnsz, err := strconv.Atoi(nStr) + n := uint32(nUnsz) require.NoError(t, err) type node struct { @@ -70,7 +70,7 @@ func Test_Example_DKG(t *testing.T) { // 2. Create the DKGs on each node for i, node := range nodes { - dkg, err := dkg.NewDistKeyGenerator(suite, nodes[i].privKey, pubKeys, uint32(n)) + dkg, err := dkg.NewDistKeyGenerator(suite, nodes[i].privKey, pubKeys, n) require.NoError(t, err) node.dkg = dkg } @@ -112,8 +112,8 @@ func Test_Example_DKG(t *testing.T) { // 6. Check and print the qualified shares for _, node := range nodes { require.True(t, node.dkg.Certified()) - require.Equal(t, n, len(node.dkg.QualifiedShares())) - require.Equal(t, n, len(node.dkg.QUAL())) + require.Equal(t, n, uint32(len(node.dkg.QualifiedShares()))) + require.Equal(t, n, uint32(len(node.dkg.QUAL()))) t.Log("qualified shares:", node.dkg.QualifiedShares()) t.Log("QUAL", node.dkg.QUAL()) } @@ -154,7 +154,7 @@ func Test_Example_DKG(t *testing.T) { S := suite.Point().Mul(node.secretShare.V, K) partials[i] = suite.Point().Sub(C, S) pubShares[i] = &share.PubShare{ - I: int32(i), V: partials[i], + I: uint32(i), V: partials[i], } } @@ -221,7 +221,7 @@ func Test_Example_DKG(t *testing.T) { ) partials[i] = v pubShares[i] = &share.PubShare{ - I: int32(i), V: partials[i], + I: uint32(i), V: partials[i], } } @@ -252,8 +252,8 @@ func Test_Example_DKG(t *testing.T) { OldNodes: pubKeys, NewNodes: pubKeys, Share: share, - Threshold: uint32(n), - OldThreshold: uint32(n), + Threshold: n, + OldThreshold: n, } newDkg, err := dkg.NewDistKeyHandler(c) require.NoError(t, err) diff --git a/group/edwards25519/fe.go b/group/edwards25519/fe.go index 7f48372e2..53565ad0b 100644 --- a/group/edwards25519/fe.go +++ b/group/edwards25519/fe.go @@ -135,29 +135,27 @@ func feFromBytes(dst *fieldElement, src []byte) { // feToBytes marshals h to s. // Preconditions: -// -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. // // Write p=2^255-19; q=floor(h/p). // Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). // // Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. // -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0= 0; i-- { + for i = 62; i >= 0; i-- { // t <<= 4 t.ToProjective(&r) diff --git a/group/edwards25519/ge_mult_vartime.go b/group/edwards25519/ge_mult_vartime.go index e4f579d5c..9ddd61fdf 100644 --- a/group/edwards25519/ge_mult_vartime.go +++ b/group/edwards25519/ge_mult_vartime.go @@ -14,7 +14,7 @@ func geScalarMultVartime(h *extendedGroupElement, a *[32]byte, var t completedGroupElement var u, A2 extendedGroupElement var r projectiveGroupElement - var i int32 + var i int // Slide through the scalar exponent clumping sequences of bits, // resulting in only zero or odd multipliers between -15 and 15. diff --git a/group/mod/int.go b/group/mod/int.go index cf973ae51..ba898f2fd 100644 --- a/group/mod/int.go +++ b/group/mod/int.go @@ -313,9 +313,9 @@ func (i *Int) MarshalSize() int { // MarshalBinary encodes the value of this Int into a byte-slice exactly Len() bytes long. // It uses i's ByteOrder to determine which byte order to output. func (i *Int) MarshalBinary() ([]byte, error) { - l := int64(i.MarshalSize()) + l := i.MarshalSize() b := i.V.Bytes() // may be shorter than l - offset := l - int64(len(b)) + offset := l - len(b) if i.BO == LittleEndian { return i.LittleEndian(l, l), nil @@ -365,9 +365,9 @@ func (i *Int) UnmarshalFrom(r io.Reader) (int, error) { // BigEndian encodes the value of this Int into a big-endian byte-slice // at least min bytes but no more than max bytes long. // Panics if max != 0 and the Int cannot be represented in max bytes. -func (i *Int) BigEndian(min, max int64) []byte { - act := int64(i.MarshalSize()) - pad, ofs := act, int64(0) +func (i *Int) BigEndian(min, max int) []byte { + act := i.MarshalSize() + pad, ofs := act, 0 if pad < min { pad, ofs = min, min-act } @@ -394,10 +394,10 @@ func (i *Int) SetBytes(a []byte) kyber.Scalar { // LittleEndian encodes the value of this Int into a little-endian byte-slice // at least min bytes but no more than max bytes long. // Panics if max != 0 and the Int cannot be represented in max bytes. -func (i *Int) LittleEndian(min, max int64) []byte { - act := int64(i.MarshalSize()) +func (i *Int) LittleEndian(min, max int) []byte { + act := i.MarshalSize() vBytes := i.V.Bytes() - vSize := int64(len(vBytes)) + vSize := len(vBytes) if vSize < act { act = vSize } diff --git a/proof/deniable.go b/proof/deniable.go index ac8ccfdb9..9da392eaa 100644 --- a/proof/deniable.go +++ b/proof/deniable.go @@ -14,7 +14,7 @@ import ( // the Sigma-protocol proofs of any or all of the other participants. // Different participants may produce different proofs of varying sizes, // and may even consist of different numbers of steps. -func DeniableProver(suite Suite, self int64, prover Prover, +func DeniableProver(suite Suite, self uint32, prover Prover, verifiers []Verifier) Protocol { return Protocol(func(ctx Context) []error { @@ -25,7 +25,7 @@ func DeniableProver(suite Suite, self int64, prover Prover, type deniableProver struct { suite Suite // Agreed-on ciphersuite for protocol - self int64 // Our own node number + self uint32 // Our own node number sc Context // Clique protocol context // verifiers for other nodes' proofs @@ -43,14 +43,14 @@ type deniableProver struct { err []error } -func (dp *deniableProver) run(suite Suite, self int64, prv Prover, +func (dp *deniableProver) run(suite Suite, self uint32, prv Prover, vrf []Verifier, sc Context) []error { dp.suite = suite dp.self = self dp.sc = sc dp.prirand = sc.Random() - nnodes := int64(len(vrf)) + nnodes := uint32(len(vrf)) if self < 0 || self >= nnodes { return []error{errors.New("out-of-range self node")} } @@ -60,7 +60,7 @@ func (dp *deniableProver) run(suite Suite, self int64, prv Prover, verr := errors.New("prover or verifier not run") dp.err = make([]error, nnodes) for i := range dp.err { - if int64(i) != self { + if uint32(i) != self { dp.err[i] = verr } } @@ -187,7 +187,7 @@ func (dp *deniableProver) challengeStep() error { mix[j] ^= key[j] } } - if int64(len(keys)) <= dp.self || !bytes.Equal(keys[dp.self], dp.key) { + if uint32(len(keys)) <= dp.self || !bytes.Equal(keys[dp.self], dp.key) { return errors.New("our own message was corrupted") } diff --git a/proof/deniable_test.go b/proof/deniable_test.go index c625b4a54..693ac53d7 100644 --- a/proof/deniable_test.go +++ b/proof/deniable_test.go @@ -13,7 +13,7 @@ import ( var testSuite = edwards25519.NewBlakeSHA256Ed25519() type node struct { - i int64 + i int done bool x kyber.Scalar @@ -54,7 +54,7 @@ func runNode(n *node) { } func TestDeniable(t *testing.T) { - nnodes := int64(5) + nnodes := 5 suite := testSuite rand := random.New() @@ -62,7 +62,7 @@ func TestDeniable(t *testing.T) { // Make some keypairs nodes := make([]*node, nnodes) - for i := int64(0); i < nnodes; i++ { + for i := 0; i < nnodes; i++ { n := &node{} nodes[i] = n n.i = i @@ -72,7 +72,7 @@ func TestDeniable(t *testing.T) { } // Make some provers and verifiers - for i := int64(0); i < nnodes; i++ { + for i := 0; i < nnodes; i++ { n := nodes[i] pred := Rep("X", "x", "B") sval := map[string]kyber.Scalar{"x": n.x} @@ -85,7 +85,7 @@ func TestDeniable(t *testing.T) { vpval := map[string]kyber.Point{"B": B, "X": nodes[vi].X} vrfs[vi] = vpred.Verifier(suite, vpval) - n.proto = DeniableProver(suite, i, prover, vrfs) + n.proto = DeniableProver(suite, uint32(i), prover, vrfs) n.outbox = make(chan []byte) n.inbox = make(chan [][]byte) diff --git a/proof/hash_test.go b/proof/hash_test.go index fb2dbd75f..caf658388 100644 --- a/proof/hash_test.go +++ b/proof/hash_test.go @@ -89,7 +89,7 @@ func Example_hashProve2() { } // Make just one of them an actual public/private keypair (X[mine],x) - mine := int64(2) // only the signer knows this + mine := int32(2) // only the signer knows this x := suite.Scalar().Pick(suite.RandomStream()) // create a private key x X[mine] = suite.Point().Mul(x, nil) // corresponding public key X @@ -116,7 +116,7 @@ func Example_hashProve2() { fmt.Printf("Linkable Ring Signature Predicate:\n%s\n", pred.String()) // The prover needs to know which Or branch (mine) is actually true. - choice := make(map[Predicate]int64) + choice := make(map[Predicate]int32) choice[pred] = mine // Generate the signature diff --git a/proof/proof.go b/proof/proof.go index 0b2f7e61e..8fdef55cd 100644 --- a/proof/proof.go +++ b/proof/proof.go @@ -63,7 +63,7 @@ type Predicate interface { // Create a Prover proving the statement this Predicate represents. Prover(suite Suite, secrets map[string]kyber.Scalar, - points map[string]kyber.Point, choice map[Predicate]int64) Prover + points map[string]kyber.Point, choice map[Predicate]int32) Prover // Create a Verifier for the statement this Predicate represents. Verifier(suite Suite, points map[string]kyber.Point) Verifier @@ -103,17 +103,17 @@ const ( type proof struct { s Suite - nsvars int64 // number of Scalar variables - npvars int64 // number of Point variables - svar, pvar []string // Scalar and Point variable names - sidx, pidx map[string]int64 // Maps from strings to variable indexes + nsvars uint32 // number of Scalar variables + npvars uint32 // number of Point variables + svar, pvar []string // Scalar and Point variable names + sidx, pidx map[string]uint32 // Maps from strings to variable indexes pval map[string]kyber.Point // values of public Point variables // prover-specific state pc ProverContext sval map[string]kyber.Scalar // values of private Scalar variables - choice map[Predicate]int64 // OR branch choices set by caller + choice map[Predicate]int32 // OR branch choices set by caller pp map[Predicate]*proverPred // per-predicate prover state // verifier-specific state @@ -317,7 +317,7 @@ func (rp *repPred) verify(prf *proof, c kyber.Scalar, pr []kyber.Scalar) error { func (rp *repPred) Prover(suite Suite, secrets map[string]kyber.Scalar, points map[string]kyber.Point, - choice map[Predicate]int64) Prover { + choice map[Predicate]int32) Prover { return proof{}.init(suite, rp).prover(rp, secrets, points, choice) } @@ -427,7 +427,7 @@ func (ap *andPred) verify(prf *proof, c kyber.Scalar, pr []kyber.Scalar) error { func (ap *andPred) Prover(suite Suite, secrets map[string]kyber.Scalar, points map[string]kyber.Point, - choice map[Predicate]int64) Prover { + choice map[Predicate]int32) Prover { return proof{}.init(suite, ap).prover(ap, secrets, points, choice) } @@ -488,13 +488,12 @@ func (op *orPred) commit(prf *proof, w kyber.Scalar, pv []kyber.Scalar) error { // We're on a proof-obligated branch; // choose random pre-challenges for only non-obligated subs. choice, ok := prf.choice[op] - len := int64(len(sub)) - if !ok || choice < 0 || choice >= len { + if !ok || choice < 0 || choice >= int32(len(sub)) { return errors.New("no choice of proof branch for OR-predicate " + op.String()) } - for i := int64(0); i < len; i++ { - if i != choice { + for i := 0; i < len(sub); i++ { + if int32(i) != choice { wi[i] = prf.s.Scalar() prf.pc.PriRand(wi[i]) } // else wi[i] == nil for proof-obligated sub @@ -537,7 +536,7 @@ func (op *orPred) respond(prf *proof, c kyber.Scalar, pr []kyber.Scalar) error { cs := prf.s.Scalar().Set(c) choice := prf.choice[op] for i := 0; i < len(sub); i++ { - if int64(i) != choice { + if int32(i) != choice { cs.Sub(cs, ci[i]) } } @@ -611,7 +610,7 @@ func (op *orPred) verify(prf *proof, c kyber.Scalar, pr []kyber.Scalar) error { func (op *orPred) Prover(suite Suite, secrets map[string]kyber.Scalar, points map[string]kyber.Point, - choice map[Predicate]int64) Prover { + choice map[Predicate]int32) Prover { return proof{}.init(suite, op).prover(op, secrets, points, choice) } @@ -642,25 +641,25 @@ func (prf proof) init(suite Suite, pred Predicate) *proof { // Reserve variable index 0 for convenience. prf.svar = []string{""} prf.pvar = []string{""} - prf.sidx = make(map[string]int64) - prf.pidx = make(map[string]int64) + prf.sidx = make(map[string]uint32) + prf.pidx = make(map[string]uint32) pred.enumVars(&prf) - prf.nsvars = int64(len(prf.svar)) - prf.npvars = int64(len(prf.pvar)) + prf.nsvars = uint32(len(prf.svar)) + prf.npvars = uint32(len(prf.pvar)) return &prf } func (prf *proof) enumScalarVar(name string) { if prf.sidx[name] == 0 { - prf.sidx[name] = int64(len(prf.svar)) + prf.sidx[name] = uint32(len(prf.svar)) prf.svar = append(prf.svar, name) } } func (prf *proof) enumPointVar(name string) { if prf.pidx[name] == 0 { - prf.pidx[name] = int64(len(prf.pvar)) + prf.pidx[name] = uint32(len(prf.pvar)) prf.pvar = append(prf.pvar, name) } } @@ -706,7 +705,7 @@ func (prf *proof) getResponses(pr []kyber.Scalar, r []kyber.Scalar) error { func (prf *proof) prove(p Predicate, sval map[string]kyber.Scalar, pval map[string]kyber.Point, - choice map[Predicate]int64, pc ProverContext) error { + choice map[Predicate]int32, pc ProverContext) error { prf.pc = pc prf.sval = sval prf.pval = pval @@ -753,7 +752,7 @@ func (prf *proof) verify(p Predicate, pval map[string]kyber.Point, // Produce a higher-order Prover embodying a given proof predicate. func (prf *proof) prover(p Predicate, sval map[string]kyber.Scalar, pval map[string]kyber.Point, - choice map[Predicate]int64) Prover { + choice map[Predicate]int32) Prover { return Prover(func(ctx ProverContext) error { return prf.prove(p, sval, pval, choice, ctx) diff --git a/proof/proof_test.go b/proof/proof_test.go index 2be7e2074..ab98e2e76 100644 --- a/proof/proof_test.go +++ b/proof/proof_test.go @@ -24,7 +24,7 @@ func TestRep(t *testing.T) { Y := suite.Point().Mul(y, X) R := suite.Point().Add(X, Y) - choice := make(map[Predicate]int64) + choice := make(map[Predicate]int32) // Simple single-secret predicate: prove X=x*B log := Rep("X", "x", "B") @@ -212,7 +212,7 @@ func Example_or2() { // We'll need to tell the prover which Or clause is actually true. // In this case clause 0, the first sub-predicate, is true: // i.e., we know a secret x such that X=x*B. - choice := make(map[Predicate]int64) + choice := make(map[Predicate]int32) choice[pred] = 0 // Generate a proof that we know the discrete logarithm of X or Y. diff --git a/share/dkg/pedersen/dkg.go b/share/dkg/pedersen/dkg.go index 946b89ba8..e7fa4ca64 100644 --- a/share/dkg/pedersen/dkg.go +++ b/share/dkg/pedersen/dkg.go @@ -556,22 +556,22 @@ func (d *DistKeyGenerator) Certified() bool { // invalid // 2. if there are no response from a share holder, the share holder is // removed from the list. -func (d *DistKeyGenerator) QualifiedShares() []int { - var invalidSh = make(map[int]bool) - var invalidDeals = make(map[int]bool) +func (d *DistKeyGenerator) QualifiedShares() []uint32 { + var invalidSh = make(map[uint32]bool) + var invalidDeals = make(map[uint32]bool) // compute list of invalid deals according to 1. for dealerIndex, verifier := range d.verifiers { responses := verifier.Responses() if len(responses) == 0 { // don't analyzes "empty" deals - i.e. dealers that never sent // their deal in the first place. - invalidDeals[int(dealerIndex)] = true + invalidDeals[dealerIndex] = true } for holderIndex := range d.c.NewNodes { resp, ok := responses[uint32(holderIndex)] if ok && resp.Status == vss.StatusComplaint { // 1. rule - invalidDeals[int(dealerIndex)] = true + invalidDeals[dealerIndex] = true break } } @@ -580,7 +580,7 @@ func (d *DistKeyGenerator) QualifiedShares() []int { // compute list of invalid share holders for valid deals for dealerIndex, verifier := range d.verifiers { // skip analyze of invalid deals - if _, present := invalidDeals[int(dealerIndex)]; present { + if _, present := invalidDeals[dealerIndex]; present { continue } responses := verifier.Responses() @@ -588,17 +588,17 @@ func (d *DistKeyGenerator) QualifiedShares() []int { _, ok := responses[uint32(holderIndex)] if !ok { // 2. rule - absent response - invalidSh[holderIndex] = true + invalidSh[uint32(holderIndex)] = true } } } - var validHolders []int + var validHolders []uint32 for i := range d.c.NewNodes { - if _, included := invalidSh[i]; included { + if _, included := invalidSh[uint32(i)]; included { continue } - validHolders = append(validHolders, i) + validHolders = append(validHolders, uint32(i)) } return validHolders } @@ -734,7 +734,7 @@ func (d *DistKeyGenerator) resharingKey() (*DistKeyShare, error) { deal := v.Deal() coeffs[i] = deal.Commitments // share of dist. secret. Invertion of rows/column - deal.SecShare.I = uint32(i) + deal.SecShare.I = i shares[i] = deal.SecShare return true }) @@ -768,7 +768,7 @@ func (d *DistKeyGenerator) resharingKey() (*DistKeyShare, error) { // using the old threshold / length because there are at most // len(d.c.OldNodes) i-th coefficients since they are the one generating one // each, thus using the old threshold. - coeff, err := share.RecoverCommit(d.suite, tmpCoeffs, int(d.oldT), len(d.c.OldNodes)) + coeff, err := share.RecoverCommit(d.suite, tmpCoeffs, d.oldT, uint32(len(d.c.OldNodes))) if err != nil { return nil, err } diff --git a/share/dkg/pedersen/dkg_test.go b/share/dkg/pedersen/dkg_test.go index 1d58a2e7c..5135d3ba9 100644 --- a/share/dkg/pedersen/dkg_test.go +++ b/share/dkg/pedersen/dkg_test.go @@ -75,7 +75,7 @@ func TestDKGDeal(t *testing.T) { deals, err := dkg.Deals() require.Nil(t, err) - require.Len(t, deals, int(defaultN)-1) + require.Len(t, deals, int(defaultN-1)) for i := range deals { require.NotNil(t, deals[i]) @@ -95,8 +95,8 @@ func TestDKGProcessDeal(t *testing.T) { rec := dkgs[1] deal := deals[1] - require.Equal(t, int(deal.Index), 0) - require.Equal(t, 1, rec.nidx) + require.Equal(t, deal.Index, uint32(0)) + require.Equal(t, uint32(1), rec.nidx) // verifier don't find itself goodP := rec.c.NewNodes @@ -374,7 +374,7 @@ func TestDKGResharingThreshold(t *testing.T) { if !dkg.newPresent { continue } - require.Contains(t, qualShares, int(dkg2.nidx)) + require.Contains(t, qualShares, dkg2.nidx) } } @@ -495,12 +495,12 @@ func TestDKGThreshold(t *testing.T) { } for _, dkg := range thrDKGs { - require.Equal(t, newTotal, len(dkg.QUAL())) + require.Equal(t, newTotal, uint32(len(dkg.QUAL()))) require.True(t, dkg.ThresholdCertified()) require.False(t, dkg.Certified()) qualShares := dkg.QualifiedShares() for _, dkg2 := range thrDKGs { - require.Contains(t, qualShares, int(dkg2.nidx)) + require.Contains(t, qualShares, dkg2.nidx) } _, err := dkg.DistKeyShare() require.NoError(t, err) @@ -676,7 +676,7 @@ func TestDKGResharingRemoveNode(t *testing.T) { publics, secrets, dkgs := generate(defaultN, oldT) fullExchange(t, dkgs, true) - newN := uint32(len(publics)) - 1 + newN := uint32(len(publics) - 1) shares := make([]*DistKeyShare, len(dkgs)) sshares := make([]*share.PriShare, len(dkgs)) for i, dkg := range dkgs { @@ -875,7 +875,7 @@ func TestDKGResharingNewNodesThreshold(t *testing.T) { // 3. make sure everyone has the same QUAL set for _, dkg := range newDkgs { - require.Equal(t, alive, len(dkg.QUAL())) + require.Equal(t, alive, uint32(len(dkg.QUAL()))) for _, dkg2 := range oldSelected { require.True(t, dkg.isInQUAL(uint32(dkg2.oidx)), "new dkg %d has not in qual old dkg %d (qual = %v)", dkg.nidx, dkg2.oidx, dkg.QUAL()) } @@ -965,7 +965,7 @@ func TestDKGResharingNewNodes(t *testing.T) { require.True(t, oldDkgs[i].canIssue) require.True(t, oldDkgs[i].isResharing) require.False(t, oldDkgs[i].newPresent) - require.Equal(t, 0, oldDkgs[i].nidx) // default for nidx + require.Equal(t, uint32(0), oldDkgs[i].nidx) // default for nidx require.Equal(t, oldDkgs[i].oidx, i) } @@ -1192,8 +1192,8 @@ func TestDKGResharingPartialNewNodes(t *testing.T) { } newDkgs := totalDkgs[1:] oldDkgs := totalDkgs[:oldN] - require.Equal(t, oldN, len(oldDkgs)) - require.Equal(t, newN, len(newDkgs)) + require.Equal(t, oldN, uint32(len(oldDkgs))) + require.Equal(t, newN, uint32(len(newDkgs))) // full secret sharing exchange // 1. broadcast deals @@ -1232,7 +1232,7 @@ func TestDKGResharingPartialNewNodes(t *testing.T) { // all new dkgs should have the same length of verifiers map for _, dkg := range newDkgs { // one deal per old participants - require.Equal(t, oldN, len(dkg.verifiers), "dkg nidx %d failing", dkg.nidx) + require.Equal(t, oldN, uint32(len(dkg.verifiers)), "dkg nidx %d failing", dkg.nidx) } // 2. Broadcast responses @@ -1255,14 +1255,14 @@ func TestDKGResharingPartialNewNodes(t *testing.T) { } for _, dkg := range newDkgs { for i := uint32(0); i < oldN; i++ { - require.True(t, dkg.verifiers[uint32(i)].DealCertified(), "new dkg %d has not certified deal %d => %v", dkg.nidx, i, dkg.verifiers[uint32(i)].Responses()) + require.True(t, dkg.verifiers[i].DealCertified(), "new dkg %d has not certified deal %d => %v", dkg.nidx, i, dkg.verifiers[i].Responses()) } } // 3. make sure everyone has the same QUAL set for _, dkg := range newDkgs { for _, dkg2 := range oldDkgs { - require.True(t, dkg.isInQUAL(uint32(dkg2.oidx)), "new dkg %d has not in qual old dkg %d (qual = %v)", dkg.nidx, dkg2.oidx, dkg.QUAL()) + require.True(t, dkg.isInQUAL(dkg2.oidx), "new dkg %d has not in qual old dkg %d (qual = %v)", dkg.nidx, dkg2.oidx, dkg.QUAL()) } } diff --git a/share/dkg/rabin/dkg_test.go b/share/dkg/rabin/dkg_test.go index 5a5ecb976..bbf25ddf8 100644 --- a/share/dkg/rabin/dkg_test.go +++ b/share/dkg/rabin/dkg_test.go @@ -15,7 +15,7 @@ import ( var suite = edwards25519.NewBlakeSHA256Ed25519() -var nbParticipants = 7 +var nbParticipants = uint32(7) var partPubs []kyber.Point var partSec []kyber.Scalar @@ -25,7 +25,7 @@ var dkgs []*DistKeyGenerator func init() { partPubs = make([]kyber.Point, nbParticipants) partSec = make([]kyber.Scalar, nbParticipants) - for i := 0; i < nbParticipants; i++ { + for i := uint32(0); i < nbParticipants; i++ { sec, pub := genPair() partPubs[i] = pub partSec[i] = sec @@ -58,7 +58,7 @@ func TestDKGDeal(t *testing.T) { deals, err := dkg.Deals() require.Nil(t, err) - assert.Len(t, deals, nbParticipants-1) + assert.Len(t, deals, int(nbParticipants-1)) for i := range deals { assert.NotNil(t, deals[i]) @@ -585,8 +585,8 @@ func TestDistKeyShare(t *testing.T) { require.Nil(t, sc) require.Nil(t, err) - require.Equal(t, nbParticipants, len(dkg.QUAL())) - require.Equal(t, nbParticipants, len(dkg.commitments)) + require.Equal(t, nbParticipants, uint32(len(dkg.QUAL()))) + require.Equal(t, nbParticipants, uint32(len(dkg.commitments))) } // missing one commitment @@ -626,7 +626,7 @@ func TestDistKeyShare(t *testing.T) { func dkgGen() []*DistKeyGenerator { dkgs := make([]*DistKeyGenerator, nbParticipants) - for i := 0; i < nbParticipants; i++ { + for i := uint32(0); i < nbParticipants; i++ { dkg, err := NewDistKeyGenerator(suite, partSec[i], partPubs, nbParticipants/2+1) if err != nil { panic(err) diff --git a/share/poly.go b/share/poly.go index 412745cb2..cc71199f6 100644 --- a/share/poly.go +++ b/share/poly.go @@ -27,7 +27,7 @@ var errorCoeffs = errors.New("different number of coefficients") // PriShare represents a private share. type PriShare struct { - I int32 // Index of the private share + I uint32 // Index of the private share V kyber.Scalar // Value of the private share } @@ -71,8 +71,8 @@ func CoefficientsToPriPoly(g kyber.Group, coeffs []kyber.Scalar) *PriPoly { } // Threshold returns the secret sharing threshold. -func (p *PriPoly) Threshold() int { - return len(p.coeffs) +func (p *PriPoly) Threshold() uint32 { + return uint32(len(p.coeffs)) } // Secret returns the shared secret p(0), i.e., the constant term of the polynomial. @@ -81,10 +81,10 @@ func (p *PriPoly) Secret() kyber.Scalar { } // Eval computes the private share v = p(i). -func (p *PriPoly) Eval(i int32) *PriShare { +func (p *PriPoly) Eval(i uint32) *PriShare { xi := p.g.Scalar().SetInt64(1 + int64(i)) v := p.g.Scalar().Zero() - for j := p.Threshold() - 1; j >= 0; j-- { + for j := int(p.Threshold()) - 1; j >= 0; j-- { v.Mul(v, xi) v.Add(v, p.coeffs[j]) } @@ -92,10 +92,10 @@ func (p *PriPoly) Eval(i int32) *PriShare { } // Shares creates a list of n private shares p(1),...,p(n). -func (p *PriPoly) Shares(n int) []*PriShare { +func (p *PriPoly) Shares(n uint32) []*PriShare { shares := make([]*PriShare, n) for i := range shares { - shares[i] = p.Eval(int32(i)) + shares[i] = p.Eval(uint32(i)) } return shares } @@ -128,7 +128,7 @@ func (p *PriPoly) Equal(q *PriPoly) bool { return false } b := 1 - for i := 0; i < p.Threshold(); i++ { + for i := uint32(0); i < p.Threshold(); i++ { pb, _ := p.coeffs[i].MarshalBinary() qb, _ := q.coeffs[i].MarshalBinary() b &= subtle.ConstantTimeCompare(pb, qb) @@ -214,7 +214,7 @@ func (s byIndexScalar) Less(i, j int) bool { return s[i].I < s[j].I } // xyScalar returns the list of (x_i, y_i) pairs indexed. The first map returned // is the list of x_i and the second map is the list of y_i, both indexed in // their respective map at index i. -func xyScalar(g kyber.Group, shares []*PriShare, t, n uint32) (map[int32]kyber.Scalar, map[int32]kyber.Scalar) { +func xyScalar(g kyber.Group, shares []*PriShare, t, n uint32) (map[uint32]kyber.Scalar, map[uint32]kyber.Scalar) { // we are sorting first the shares since the shares may be unrelated for // some applications. In this case, all participants needs to interpolate on // the exact same order shares. @@ -226,8 +226,8 @@ func xyScalar(g kyber.Group, shares []*PriShare, t, n uint32) (map[int32]kyber.S } sort.Sort(byIndexScalar(sorted)) - x := make(map[int32]kyber.Scalar) - y := make(map[int32]kyber.Scalar) + x := make(map[uint32]kyber.Scalar) + y := make(map[uint32]kyber.Scalar) for _, s := range sorted { if s == nil || s.V == nil || s.I < 0 { continue @@ -296,7 +296,7 @@ func (p *PriPoly) String() string { // PubShare represents a public share. type PubShare struct { - I int32 // Index of the public share + I uint32 // Index of the public share V kyber.Point // Value of the public share } @@ -326,8 +326,8 @@ func (p *PubPoly) Info() (base kyber.Point, commits []kyber.Point) { } // Threshold returns the secret sharing threshold. -func (p *PubPoly) Threshold() int { - return len(p.commits) +func (p *PubPoly) Threshold() uint32 { + return uint32(len(p.commits)) } // Commit returns the secret commitment p(0), i.e., the constant term of the polynomial. @@ -336,10 +336,10 @@ func (p *PubPoly) Commit() kyber.Point { } // Eval computes the public share v = p(i). -func (p *PubPoly) Eval(i int32) *PubShare { +func (p *PubPoly) Eval(i uint32) *PubShare { xi := p.g.Scalar().SetInt64(1 + int64(i)) // x-coordinate of this share v := p.g.Point().Null() - for j := p.Threshold() - 1; j >= 0; j-- { + for j := int(p.Threshold()) - 1; j >= 0; j-- { v.Mul(xi, v) v.Add(v, p.commits[j]) } @@ -347,10 +347,10 @@ func (p *PubPoly) Eval(i int32) *PubShare { } // Shares creates a list of n public commitment shares p(1),...,p(n). -func (p *PubPoly) Shares(n int) []*PubShare { +func (p *PubPoly) Shares(n uint32) []*PubShare { shares := make([]*PubShare, n) for i := range shares { - shares[i] = p.Eval(int32(i)) + shares[i] = p.Eval(uint32(i)) } return shares } @@ -387,7 +387,7 @@ func (p *PubPoly) Equal(q *PubPoly) bool { return false } b := 1 - for i := 0; i < p.Threshold(); i++ { + for i := uint32(0); i < p.Threshold(); i++ { pb, _ := p.commits[i].MarshalBinary() qb, _ := q.commits[i].MarshalBinary() b &= subtle.ConstantTimeCompare(pb, qb) @@ -409,7 +409,7 @@ func (s byIndexPub) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byIndexPub) Less(i, j int) bool { return s[i].I < s[j].I } // xyCommits is the public version of xScalars. -func xyCommit(g kyber.Group, shares []*PubShare, t, n uint32) (map[int32]kyber.Scalar, map[int32]kyber.Point) { +func xyCommit(g kyber.Group, shares []*PubShare, t, n uint32) (map[uint32]kyber.Scalar, map[uint32]kyber.Point) { // we are sorting first the shares since the shares may be unrelated for // some applications. In this case, all participants needs to interpolate on // the exact same order shares. @@ -421,8 +421,8 @@ func xyCommit(g kyber.Group, shares []*PubShare, t, n uint32) (map[int32]kyber.S } sort.Sort(byIndexPub(sorted)) - x := make(map[int32]kyber.Scalar) - y := make(map[int32]kyber.Point) + x := make(map[uint32]kyber.Scalar) + y := make(map[uint32]kyber.Point) for _, s := range sorted { if s == nil || s.V == nil || s.I < 0 { @@ -481,7 +481,7 @@ func RecoverPubPoly(g kyber.Group, shares []*PubShare, t, n uint32) (*PubPoly, e var err error for j := range x { - basis := lagrangeBasis(g, int32(j), x) + basis := lagrangeBasis(g, j, x) // compute the L_j * y_j polynomial in point space tmp := basis.Commit(y[j]) @@ -504,7 +504,7 @@ func RecoverPubPoly(g kyber.Group, shares []*PubShare, t, n uint32) (*PubPoly, e // lagrangeBasis returns a PriPoly containing the Lagrange coefficients for the // i-th position. xs is a mapping between the indices and the values that the // interpolation is using, computed with xyScalar(). -func lagrangeBasis(g kyber.Group, i int32, xs map[int32]kyber.Scalar) *PriPoly { +func lagrangeBasis(g kyber.Group, i uint32, xs map[uint32]kyber.Scalar) *PriPoly { var basis = &PriPoly{ g: g, coeffs: []kyber.Scalar{g.Scalar().One()}, diff --git a/share/poly_test.go b/share/poly_test.go index 47ecc6563..6e9aa9ea2 100644 --- a/share/poly_test.go +++ b/share/poly_test.go @@ -11,7 +11,7 @@ import ( func TestSecretRecovery(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 poly := NewPriPoly(g, t, nil, g.RandomStream()) shares := poly.Shares(n) @@ -39,13 +39,13 @@ func TestSecretRecovery(test *testing.T) { // See TestPublicRecoveryOutIndex for testing with the commitment. func TestSecretRecoveryOutIndex(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 poly := NewPriPoly(g, t, nil, g.RandomStream()) shares := poly.Shares(n) selected := shares[n-t:] - require.Len(test, selected, t) + require.Len(test, selected, int(t)) newN := t + 1 recovered, err := RecoverSecret(g, selected, t, newN) @@ -60,7 +60,7 @@ func TestSecretRecoveryOutIndex(test *testing.T) { func TestSecretRecoveryDelete(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 poly := NewPriPoly(g, t, nil, g.RandomStream()) shares := poly.Shares(n) @@ -83,7 +83,7 @@ func TestSecretRecoveryDelete(test *testing.T) { func TestSecretRecoveryDeleteFail(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 poly := NewPriPoly(g, t, nil, g.RandomStream()) @@ -104,7 +104,7 @@ func TestSecretRecoveryDeleteFail(test *testing.T) { func TestSecretPolyEqual(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 p1 := NewPriPoly(g, t, nil, g.RandomStream()) @@ -124,7 +124,7 @@ func TestSecretPolyEqual(test *testing.T) { func TestPublicCheck(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 priPoly := NewPriPoly(g, t, nil, g.RandomStream()) @@ -140,7 +140,7 @@ func TestPublicCheck(test *testing.T) { func TestPublicRecovery(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 priPoly := NewPriPoly(g, t, nil, g.RandomStream()) @@ -166,7 +166,7 @@ func TestPublicRecovery(test *testing.T) { func TestPublicRecoveryOutIndex(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 priPoly := NewPriPoly(g, t, nil, g.RandomStream()) @@ -174,7 +174,7 @@ func TestPublicRecoveryOutIndex(test *testing.T) { pubShares := pubPoly.Shares(n) selected := pubShares[n-t:] - require.Len(test, selected, t) + require.Len(test, selected, int(t)) newN := t + 1 recovered, err := RecoverCommit(g, selected, t, newN) @@ -196,7 +196,7 @@ func TestPublicRecoveryOutIndex(test *testing.T) { func TestPublicRecoveryDelete(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 priPoly := NewPriPoly(g, t, nil, g.RandomStream()) @@ -221,7 +221,7 @@ func TestPublicRecoveryDelete(test *testing.T) { func TestPublicRecoveryDeleteFail(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 priPoly := NewPriPoly(g, t, nil, g.RandomStream()) @@ -243,7 +243,7 @@ func TestPublicRecoveryDeleteFail(test *testing.T) { func TestPrivateAdd(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 p := NewPriPoly(g, t, nil, g.RandomStream()) @@ -265,7 +265,7 @@ func TestPrivateAdd(test *testing.T) { func TestPublicAdd(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 G := g.Point().Pick(g.RandomStream()) @@ -299,7 +299,7 @@ func TestPublicAdd(test *testing.T) { func TestPublicPolyEqual(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 G := g.Point().Pick(g.RandomStream()) @@ -325,7 +325,7 @@ func TestPublicPolyEqual(test *testing.T) { func TestPriPolyMul(test *testing.T) { suite := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 a := NewPriPoly(suite, t, nil, suite.RandomStream()) b := NewPriPoly(suite, t, nil, suite.RandomStream()) @@ -352,7 +352,7 @@ func TestPriPolyMul(test *testing.T) { func TestRecoverPriPoly(test *testing.T) { suite := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 a := NewPriPoly(suite, t, nil, suite.RandomStream()) @@ -368,20 +368,20 @@ func TestRecoverPriPoly(test *testing.T) { reverseRecovered, err := RecoverPriPoly(suite, reverses, t, n) assert.Nil(test, err) - for i := 0; i < t; i++ { - assert.Equal(test, recovered.Eval(int64(i)).V.String(), a.Eval(int64(i)).V.String()) - assert.Equal(test, reverseRecovered.Eval(int64(i)).V.String(), a.Eval(int64(i)).V.String()) + for i := uint32(0); i < t; i++ { + assert.Equal(test, recovered.Eval(i).V.String(), a.Eval(i).V.String()) + assert.Equal(test, reverseRecovered.Eval(i).V.String(), a.Eval(i).V.String()) } } func TestPriPolyCoefficients(test *testing.T) { suite := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 a := NewPriPoly(suite, t, nil, suite.RandomStream()) coeffs := a.Coefficients() - require.Len(test, coeffs, t) + require.Len(test, coeffs, int(t)) b := CoefficientsToPriPoly(suite, coeffs) require.Equal(test, a.coeffs, b.coeffs) @@ -390,7 +390,7 @@ func TestPriPolyCoefficients(test *testing.T) { func TestRefreshDKG(test *testing.T) { g := edwards25519.NewBlakeSHA256Ed25519() - n := 10 + n := uint32(10) t := n/2 + 1 // Run an n-fold Pedersen VSS (= DKG) @@ -398,7 +398,7 @@ func TestRefreshDKG(test *testing.T) { priShares := make([][]*PriShare, n) pubPolys := make([]*PubPoly, n) pubShares := make([][]*PubShare, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { priPolys[i] = NewPriPoly(g, t, nil, g.RandomStream()) priShares[i] = priPolys[i].Shares(n) pubPolys[i] = priPolys[i].Commit(nil) @@ -406,8 +406,8 @@ func TestRefreshDKG(test *testing.T) { } // Verify VSS shares - for i := 0; i < n; i++ { - for j := 0; j < n; j++ { + for i := uint32(0); i < n; i++ { + for j := uint32(0); j < n; j++ { sij := priShares[i][j] // s_ij * G sijG := g.Point().Base().Mul(sij.V, nil) @@ -417,19 +417,19 @@ func TestRefreshDKG(test *testing.T) { // Create private DKG shares dkgShares := make([]*PriShare, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { acc := g.Scalar().Zero() - for j := 0; j < n; j++ { // assuming all participants are in the qualified set + for j := uint32(0); j < n; j++ { // assuming all participants are in the qualified set acc = g.Scalar().Add(acc, priShares[j][i].V) } - dkgShares[i] = &PriShare{int64(i), acc} + dkgShares[i] = &PriShare{i, acc} } // Create public DKG commitments (= verification vector) dkgCommits := make([]kyber.Point, t) - for k := 0; k < t; k++ { + for k := uint32(0); k < t; k++ { acc := g.Point().Null() - for i := 0; i < n; i++ { // assuming all participants are in the qualified set + for i := uint32(0); i < n; i++ { // assuming all participants are in the qualified set _, coeff := pubPolys[i].Info() acc = g.Point().Add(acc, coeff[k]) } @@ -438,7 +438,7 @@ func TestRefreshDKG(test *testing.T) { // Check that the private DKG shares verify against the public DKG commits dkgPubPoly := NewPubPoly(g, nil, dkgCommits) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { require.True(test, dkgPubPoly.Check(dkgShares[i])) } @@ -449,7 +449,7 @@ func TestRefreshDKG(test *testing.T) { subPubShares := make([][]*PubShare, n) // Create subshares and subpolys - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { subPriPolys[i] = NewPriPoly(g, t, dkgShares[i].V, g.RandomStream()) subPriShares[i] = subPriPolys[i].Shares(n) subPubPolys[i] = subPriPolys[i].Commit(nil) @@ -459,18 +459,18 @@ func TestRefreshDKG(test *testing.T) { // Handout shares to new nodes column-wise and verify them newDKGShares := make([]*PriShare, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { tmpPriShares := make([]*PriShare, n) // column-wise reshuffled sub-shares tmpPubShares := make([]*PubShare, n) // public commitments to old DKG private shares - for j := 0; j < n; j++ { + for j := uint32(0); j < n; j++ { // Check 1: Verify that the received individual private subshares s_ji // is correct by evaluating the public commitment vector - tmpPriShares[j] = &PriShare{I: int64(j), V: subPriShares[j][i].V} // Shares that participant i gets from j - require.True(test, g.Point().Mul(tmpPriShares[j].V, nil).Equal(subPubPolys[j].Eval(int64(i)).V)) + tmpPriShares[j] = &PriShare{I: j, V: subPriShares[j][i].V} // Shares that participant i gets from j + require.True(test, g.Point().Mul(tmpPriShares[j].V, nil).Equal(subPubPolys[j].Eval(i).V)) // Check 2: Verify that the received sub public shares are // commitments to the original secret - tmpPubShares[j] = dkgPubPoly.Eval(int64(j)) + tmpPubShares[j] = dkgPubPoly.Eval(j) require.True(test, tmpPubShares[j].V.Equal(subPubPolys[j].Commit())) } // Check 3: Verify that the received public shares interpolate to the @@ -482,16 +482,16 @@ func TestRefreshDKG(test *testing.T) { // Compute the refreshed private DKG share of node i s, err := RecoverSecret(g, tmpPriShares, t, n) require.NoError(test, err) - newDKGShares[i] = &PriShare{I: int64(i), V: s} + newDKGShares[i] = &PriShare{I: i, V: s} } // Refresh the DKG commitments (= verification vector) newDKGCommits := make([]kyber.Point, t) - for i := 0; i < t; i++ { + for i := uint32(0); i < t; i++ { pubShares := make([]*PubShare, n) - for j := 0; j < n; j++ { + for j := uint32(0); j < n; j++ { _, c := subPubPolys[j].Info() - pubShares[j] = &PubShare{I: int64(j), V: c[i]} + pubShares[j] = &PubShare{I: j, V: c[i]} } com, err := RecoverCommit(g, pubShares, t, n) require.NoError(test, err) @@ -502,13 +502,13 @@ func TestRefreshDKG(test *testing.T) { require.True(test, dkgCommits[0].Equal(newDKGCommits[0])) // Check that the old and new DKG private shares are different - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { require.False(test, dkgShares[i].V.Equal(newDKGShares[i].V)) } // Check that the refreshed private DKG shares verify against the refreshed public DKG commits q := NewPubPoly(g, nil, newDKGCommits) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { require.True(test, q.Check(newDKGShares[i])) } diff --git a/share/pvss/pvss.go b/share/pvss/pvss.go index 4ef31648f..c8b9778c6 100644 --- a/share/pvss/pvss.go +++ b/share/pvss/pvss.go @@ -47,7 +47,7 @@ type PubVerShare struct { // t and the base point H. The function returns the list of shares and the // public commitment polynomial. func EncShares(suite Suite, H kyber.Point, X []kyber.Point, secret kyber.Scalar, t uint32) (shares []*PubVerShare, commit *share.PubPoly, err error) { - n := len(X) + n := uint32(len(X)) encShares := make([]*PubVerShare, n) // Create secret sharing polynomial @@ -60,10 +60,10 @@ func EncShares(suite Suite, H kyber.Point, X []kyber.Point, secret kyber.Scalar, pubPoly := priPoly.Commit(H) // Prepare data for encryption consistency proofs ... - indices := make([]int32, n) + indices := make([]uint32, n) values := make([]kyber.Scalar, n) HS := make([]kyber.Point, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { indices[i] = priShares[i].I values[i] = priShares[i].V HS[i] = H @@ -75,7 +75,7 @@ func EncShares(suite Suite, H kyber.Point, X []kyber.Point, secret kyber.Scalar, return nil, nil, err } - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { ps := &share.PubShare{I: indices[i], V: sX[i]} encShares[i] = &PubVerShare{*ps, *proofs[i]} } diff --git a/share/pvss/pvss_test.go b/share/pvss/pvss_test.go index e9dfe32b5..5c38eab24 100644 --- a/share/pvss/pvss_test.go +++ b/share/pvss/pvss_test.go @@ -12,11 +12,11 @@ func TestPVSS(test *testing.T) { suite := edwards25519.NewBlakeSHA256Ed25519() G := suite.Point().Base() H := suite.Point().Pick(suite.XOF([]byte("H"))) - n := 10 + n := uint32(10) t := 2*n/3 + 1 x := make([]kyber.Scalar, n) // trustee private keys X := make([]kyber.Point, n) // trustee public keys - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { x[i] = suite.Scalar().Pick(suite.RandomStream()) X[i] = suite.Point().Mul(x[i], nil) } @@ -30,7 +30,7 @@ func TestPVSS(test *testing.T) { // (2) Share decryption (trustees) sH := make([]kyber.Point, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { sH[i] = pubPoly.Eval(encShares[i].S.I).V } @@ -38,7 +38,7 @@ func TestPVSS(test *testing.T) { var E []*PubVerShare // good encrypted shares var D []*PubVerShare // good decrypted shares - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { if ds, err := DecShare(suite, H, X[i], sH[i], x[i], encShares[i]); err == nil { K = append(K, X[i]) E = append(E, encShares[i]) @@ -56,11 +56,11 @@ func TestPVSSDelete(test *testing.T) { suite := edwards25519.NewBlakeSHA256Ed25519() G := suite.Point().Base() H := suite.Point().Pick(suite.XOF([]byte("H"))) - n := 10 + n := uint32(10) t := 2*n/3 + 1 x := make([]kyber.Scalar, n) // trustee private keys X := make([]kyber.Point, n) // trustee public keys - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { x[i] = suite.Scalar().Pick(suite.RandomStream()) X[i] = suite.Point().Mul(x[i], nil) } @@ -78,7 +78,7 @@ func TestPVSSDelete(test *testing.T) { // (2) Share decryption (trustees) sH := make([]kyber.Point, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { sH[i] = pubPoly.Eval(encShares[i].S.I).V } @@ -86,7 +86,7 @@ func TestPVSSDelete(test *testing.T) { var E []*PubVerShare // good encrypted shares var D []*PubVerShare // good decrypted shares - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { if ds, err := DecShare(suite, H, X[i], sH[i], x[i], encShares[i]); err == nil { K = append(K, X[i]) E = append(E, encShares[i]) @@ -107,11 +107,11 @@ func TestPVSSDeleteFail(test *testing.T) { suite := edwards25519.NewBlakeSHA256Ed25519() G := suite.Point().Base() H := suite.Point().Pick(suite.XOF([]byte("H"))) - n := 10 + n := uint32(10) t := 2*n/3 + 1 x := make([]kyber.Scalar, n) // trustee private keys X := make([]kyber.Point, n) // trustee public keys - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { x[i] = suite.Scalar().Pick(suite.RandomStream()) X[i] = suite.Point().Mul(x[i], nil) } @@ -129,7 +129,7 @@ func TestPVSSDeleteFail(test *testing.T) { // (2) Share decryption (trustees) sH := make([]kyber.Point, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { sH[i] = pubPoly.Eval(encShares[i].S.I).V } @@ -137,7 +137,7 @@ func TestPVSSDeleteFail(test *testing.T) { var E []*PubVerShare // good encrypted shares var D []*PubVerShare // good decrypted shares - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { if ds, err := DecShare(suite, H, X[i], sH[i], x[i], encShares[i]); err == nil { K = append(K, X[i]) E = append(E, encShares[i]) @@ -158,11 +158,11 @@ func TestPVSSBatch(test *testing.T) { suite := edwards25519.NewBlakeSHA256Ed25519() G := suite.Point().Base() H := suite.Point().Pick(suite.XOF([]byte("H"))) - n := 5 + n := uint32(5) t := 2*n/3 + 1 x := make([]kyber.Scalar, n) // trustee private keys X := make([]kyber.Point, n) // trustee public keys - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { x[i] = suite.Scalar().Pick(suite.RandomStream()) X[i] = suite.Point().Mul(x[i], nil) } @@ -183,7 +183,7 @@ func TestPVSSBatch(test *testing.T) { sH0 := make([]kyber.Point, n) sH1 := make([]kyber.Point, n) sH2 := make([]kyber.Point, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { sH0[i] = p0.Eval(e0[i].S.I).V sH1[i] = p1.Eval(e1[i].S.I).V sH2[i] = p2.Eval(e2[i].S.I).V diff --git a/share/vss/pedersen/vss.go b/share/vss/pedersen/vss.go index 36de99c7c..5cc5839f1 100644 --- a/share/vss/pedersen/vss.go +++ b/share/vss/pedersen/vss.go @@ -129,7 +129,7 @@ func NewDealer(suite Suite, longterm, secret kyber.Scalar, verifiers []kyber.Poi } d.t = t - f := share.NewPriPoly(d.suite, t, d.secret, suite.RandomStream()) + f := share.NewPriPoly(d.suite, d.t, d.secret, suite.RandomStream()) d.pub = d.suite.Point().Mul(d.long, nil) // Compute public polynomial coefficients @@ -137,16 +137,16 @@ func NewDealer(suite Suite, longterm, secret kyber.Scalar, verifiers []kyber.Poi _, d.secretCommits = F.Info() var err error - d.sessionID, err = sessionID(d.suite, d.pub, d.verifiers, d.secretCommits, t) + d.sessionID, err = sessionID(d.suite, d.pub, d.verifiers, d.secretCommits, d.t) if err != nil { return nil, err } - d.Aggregator = newAggregator(d.suite, d.pub, d.verifiers, d.secretCommits, t, d.sessionID) + d.Aggregator = newAggregator(d.suite, d.pub, d.verifiers, d.secretCommits, d.t, d.sessionID) // C = F + G d.deals = make([]*Deal, len(d.verifiers)) for i := range d.verifiers { - fi := f.Eval(int32(i)) + fi := f.Eval(uint32(i)) d.deals[i] = &Deal{ SessionID: d.sessionID, SecShare: fi, @@ -301,7 +301,7 @@ type Verifier struct { longterm kyber.Scalar pub kyber.Point dealer kyber.Point - index int32 + index uint32 verifiers []kyber.Point hkdfContext []byte *Aggregator @@ -320,11 +320,11 @@ func NewVerifier(suite Suite, longterm kyber.Scalar, dealerKey kyber.Point, pub := suite.Point().Mul(longterm, nil) var ok bool - var index int32 + var index uint32 for i, v := range verifiers { if v.Equal(pub) { ok = true - index = int32(i) + index = uint32(i) break } } @@ -465,7 +465,7 @@ func (v *Verifier) Key() (kyber.Scalar, kyber.Point) { // Index returns the index of the verifier in the list of participants used // during this run of the protocol. -func (v *Verifier) Index() int32 { +func (v *Verifier) Index() uint32 { return v.index } @@ -580,7 +580,7 @@ func (a *Aggregator) VerifyDeal(d *Deal, inclusion bool) error { } fi := d.SecShare - if fi.I < 0 || fi.I >= int32(len(a.verifiers)) { + if fi.I < 0 || fi.I >= uint32(len(a.verifiers)) { return errors.New("vss: index out of bounds in Deal") } // compute fi * G @@ -695,7 +695,7 @@ func (a *Aggregator) DealCertified() bool { } } enoughApprovals := approvals >= a.t - tooMuchAbsents := absentVerifiers > uint32(len(a.verifiers))-a.t + tooMuchAbsents := a.t > uint32(len(a.verifiers)) || absentVerifiers > uint32(len(a.verifiers))-a.t baseCondition := !a.badDealer && enoughApprovals && !isComplaint if a.timeout { return baseCondition && !tooMuchAbsents @@ -724,7 +724,7 @@ func MinimumT(n uint32) uint32 { } func validT(t uint32, verifiers []kyber.Point) bool { - return t >= 2 && t <= uint32(len(verifiers)) && uint32(t) == t + return t >= 2 && t <= uint32(len(verifiers)) } func deriveH(suite Suite, verifiers []kyber.Point) kyber.Point { diff --git a/share/vss/pedersen/vss_test.go b/share/vss/pedersen/vss_test.go index 57b1da319..82183b47c 100644 --- a/share/vss/pedersen/vss_test.go +++ b/share/vss/pedersen/vss_test.go @@ -17,9 +17,9 @@ var rng = blake2xb.New(nil) var suite = edwards25519.NewBlakeSHA256Ed25519WithRand(rng) -var nbVerifiers = 7 +var nbVerifiers = uint32(7) -var vssThreshold int +var vssThreshold uint32 var verifiersPub []kyber.Point var verifiersSec []kyber.Scalar @@ -92,7 +92,7 @@ func TestVSSDealerNew(t *testing.T) { require.NoError(t, err) require.NotNil(t, dealer.secretPoly) - for _, badT := range []int{0, 1, -4} { + for _, badT := range []uint32{0, 1} { _, err = NewDealer(suite, dealerSec, secret, verifiersPub, badT) assert.Error(t, err) } @@ -100,7 +100,7 @@ func TestVSSDealerNew(t *testing.T) { } func TestVSSVerifierNew(t *testing.T) { - randIdx := int64(rand.Int() % len(verifiersPub)) + randIdx := uint32(rand.Int() % len(verifiersPub)) v, err := NewVerifier(suite, verifiersSec[randIdx], dealerPub, verifiersPub) assert.NoError(t, err) assert.Equal(t, randIdx, v.index) @@ -123,13 +123,13 @@ func TestVSSShare(t *testing.T) { aggr := ver.Aggregator - for i := int64(1); i < aggr.t-1; i++ { - aggr.responses[uint32(i)] = &Response{Status: StatusApproval} + for i := uint32(1); i < aggr.t-1; i++ { + aggr.responses[i] = &Response{Status: StatusApproval} } // not enough approvals assert.Nil(t, ver.Deal()) - aggr.responses[uint32(aggr.t)] = &Response{Status: StatusApproval} + aggr.responses[aggr.t] = &Response{Status: StatusApproval} // Timeout all other (i>t) verifiers ver.SetTimeout() @@ -147,8 +147,8 @@ func TestVSSAggregatorDealCertified(t *testing.T) { dealer := genDealer() aggr := dealer.Aggregator - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Status: StatusApproval} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Status: StatusApproval} } // Mark remaining verifiers as timed-out @@ -166,8 +166,8 @@ func TestVSSAggregatorDealCertified(t *testing.T) { // inconsistent state on purpose // too much complaints - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Status: StatusComplaint} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Status: StatusComplaint} } assert.False(t, aggr.DealCertified()) } @@ -225,10 +225,10 @@ func TestVSSVerifierReceiveDeal(t *testing.T) { require.NotNil(t, resp) assert.Equal(t, StatusApproval, resp.Status) assert.Nil(t, err) - assert.Equal(t, v.index, int64(resp.Index)) + assert.Equal(t, v.index, resp.Index) assert.Equal(t, dealer.sid, resp.SessionID) assert.Nil(t, schnorr.Verify(suite, v.pub, resp.Hash(suite), resp.Signature)) - assert.Equal(t, v.responses[uint32(v.index)], resp) + assert.Equal(t, v.responses[v.index], resp) // wrong encryption goodSig := encD.Signature @@ -240,7 +240,7 @@ func TestVSSVerifierReceiveDeal(t *testing.T) { // wrong index goodIdx := d.SecShare.I - d.SecShare.I = (goodIdx - 1) % int64(nbVerifiers) + d.SecShare.I = (goodIdx - 1) % nbVerifiers encD, _ = dealer.EncryptedDeal(0) resp, err = v.ProcessEncryptedDeal(encD) assert.Error(t, err) @@ -406,13 +406,13 @@ func TestVSSAggregatorAllResponses(t *testing.T) { dealer := genDealer() aggr := dealer.Aggregator - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Status: StatusApproval} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Status: StatusApproval} } assert.False(t, aggr.DealCertified()) - for i := aggr.t; i < int64(nbVerifiers); i++ { - aggr.responses[uint32(i)] = &Response{Status: StatusApproval} + for i := aggr.t; i < nbVerifiers; i++ { + aggr.responses[i] = &Response{Status: StatusApproval} } assert.True(t, aggr.DealCertified()) @@ -423,8 +423,8 @@ func TestVSSDealerTimeout(t *testing.T) { dealer := genDealer() aggr := dealer.Aggregator - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Status: StatusApproval} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Status: StatusApproval} } require.False(t, aggr.DealCertified()) @@ -452,8 +452,8 @@ func TestVSSVerifierTimeout(t *testing.T) { aggr := v.Aggregator // Add t responses - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Status: StatusApproval} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Status: StatusApproval} } assert.False(t, aggr.DealCertified()) @@ -501,9 +501,7 @@ func TestVSSAggregatorVerifyDeal(t *testing.T) { deal.SecShare.I = goodI // index not in bounds - deal.SecShare.I = -1 - assert.Error(t, aggr.VerifyDeal(deal, false)) - deal.SecShare.I = int64(len(verifiersPub)) + deal.SecShare.I = uint32(len(verifiersPub)) assert.Error(t, aggr.VerifyDeal(deal, false)) // shares invalid in respect to the commitments @@ -534,16 +532,16 @@ func TestVSSAggregatorAddComplaint(t *testing.T) { func TestVSSSessionID(t *testing.T) { dealer, _ := NewDealer(suite, dealerSec, secret, verifiersPub, vssThreshold) commitments := dealer.deals[0].Commitments - sid, err := sessionID(suite, dealerPub, verifiersPub, commitments, int(dealer.t)) + sid, err := sessionID(suite, dealerPub, verifiersPub, commitments, dealer.t) assert.NoError(t, err) - sid2, err2 := sessionID(suite, dealerPub, verifiersPub, commitments, int(dealer.t)) + sid2, err2 := sessionID(suite, dealerPub, verifiersPub, commitments, dealer.t) assert.NoError(t, err2) assert.Equal(t, sid, sid2) wrongDealerPub := suite.Point().Add(dealerPub, dealerPub) - sid3, err3 := sessionID(suite, wrongDealerPub, verifiersPub, commitments, int(dealer.t)) + sid3, err3 := sessionID(suite, wrongDealerPub, verifiersPub, commitments, dealer.t) assert.NoError(t, err3) assert.NotEqual(t, sid3, sid2) } @@ -576,10 +574,10 @@ func genPair() (kyber.Scalar, kyber.Point) { return secret, public } -func genCommits(n int) ([]kyber.Scalar, []kyber.Point) { +func genCommits(n uint32) ([]kyber.Scalar, []kyber.Point) { var secrets = make([]kyber.Scalar, n) var publics = make([]kyber.Point, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { secrets[i], publics[i] = genPair() } return secrets, publics @@ -593,7 +591,7 @@ func genDealer() *Dealer { func genAll() (*Dealer, []*Verifier) { dealer := genDealer() var verifiers = make([]*Verifier, nbVerifiers) - for i := 0; i < nbVerifiers; i++ { + for i := uint32(0); i < nbVerifiers; i++ { v, _ := NewVerifier(suite, verifiersSec[i], dealerPub, verifiersPub) verifiers[i] = v } diff --git a/share/vss/rabin/vss.go b/share/vss/rabin/vss.go index 997df19b4..39f220d6a 100644 --- a/share/vss/rabin/vss.go +++ b/share/vss/rabin/vss.go @@ -148,8 +148,8 @@ func NewDealer(suite Suite, longterm, secret kyber.Scalar, verifiers []kyber.Poi d.t = t H := deriveH(d.suite, d.verifiers) - f := share.NewPriPoly(d.suite, t, d.secret, suite.RandomStream()) - g := share.NewPriPoly(d.suite, t, nil, suite.RandomStream()) + f := share.NewPriPoly(d.suite, d.t, d.secret, suite.RandomStream()) + g := share.NewPriPoly(d.suite, d.t, nil, suite.RandomStream()) d.pub = d.suite.Point().Mul(d.long, nil) // Compute public polynomial coefficients @@ -163,23 +163,23 @@ func NewDealer(suite Suite, longterm, secret kyber.Scalar, verifiers []kyber.Poi } _, commitments := C.Info() - d.sessionID, err = sessionID(d.suite, d.pub, d.verifiers, commitments, t) + d.sessionID, err = sessionID(d.suite, d.pub, d.verifiers, commitments, d.t) if err != nil { return nil, err } - d.aggregator = newAggregator(d.suite, d.pub, d.verifiers, commitments, t, d.sessionID) + d.aggregator = newAggregator(d.suite, d.pub, d.verifiers, commitments, d.t, d.sessionID) // C = F + G d.deals = make([]*Deal, len(d.verifiers)) for i := range d.verifiers { - fi := f.Eval(int32(i)) - gi := g.Eval(int32(i)) + fi := f.Eval(uint32(i)) + gi := g.Eval(uint32(i)) d.deals[i] = &Deal{ SessionID: d.sessionID, SecShare: fi, RndShare: gi, Commitments: commitments, - T: uint32(d.t), + T: d.t, } } d.hkdfContext = context(suite, d.pub, verifiers) @@ -322,7 +322,7 @@ type Verifier struct { longterm kyber.Scalar pub kyber.Point dealer kyber.Point - index int32 + index uint32 verifiers []kyber.Point hkdfContext []byte *aggregator @@ -341,11 +341,11 @@ func NewVerifier(suite Suite, longterm kyber.Scalar, dealerKey kyber.Point, pub := suite.Point().Mul(longterm, nil) var ok bool - var index int32 + var index uint32 for i, v := range verifiers { if v.Equal(pub) { ok = true - index = int32(i) + index = uint32(i) break } } @@ -474,7 +474,7 @@ func (v *Verifier) Key() (kyber.Scalar, kyber.Point) { // Index returns the index of the verifier in the list of participants used // during this run of the protocol. -func (v *Verifier) Index() int32 { +func (v *Verifier) Index() uint32 { return v.index } @@ -564,7 +564,7 @@ func (a *aggregator) VerifyDeal(d *Deal, inclusion bool) error { if fi.I != gi.I { return errors.New("vss: not the same index for f and g share in Deal") } - if fi.I < 0 || fi.I >= int32(len(a.verifiers)) { + if fi.I < 0 || fi.I >= uint32(len(a.verifiers)) { return errors.New("vss: index out of bounds in Deal") } // compute fi * G + gi * H @@ -695,12 +695,12 @@ func (a *aggregator) UnsafeSetResponseDKG(idx uint32, approval bool) { // T should be adjusted to your threat model. Setting a lower T decreases the // difficulty for an adversary to break secrecy. However, a too large T makes // it possible for an adversary to prevent recovery (robustness). -func MinimumT(n int) int { +func MinimumT(n uint32) uint32 { return (n + 1) / 2 } func validT(t uint32, verifiers []kyber.Point) bool { - return t >= 2 && t <= uint32(len(verifiers)) && uint32(t) == t + return t >= 2 && t <= uint32(len(verifiers)) } func deriveH(suite Suite, verifiers []kyber.Point) kyber.Point { diff --git a/share/vss/rabin/vss_test.go b/share/vss/rabin/vss_test.go index 33c179c1b..0c77f7395 100644 --- a/share/vss/rabin/vss_test.go +++ b/share/vss/rabin/vss_test.go @@ -14,9 +14,9 @@ import ( var suite = edwards25519.NewBlakeSHA256Ed25519() -var nbVerifiers = 7 +var nbVerifiers = uint32(7) -var vssThreshold int +var vssThreshold uint32 var verifiersPub []kyber.Point var verifiersSec []kyber.Scalar @@ -83,14 +83,14 @@ func TestVSSDealerNew(t *testing.T) { _, err := NewDealer(suite, dealerSec, secret, verifiersPub, goodT) assert.NoError(t, err) - for _, badT := range []int{0, 1, -4} { + for _, badT := range []uint32{0, 1} { _, err = NewDealer(suite, dealerSec, secret, verifiersPub, badT) assert.Error(t, err) } } func TestVSSVerifierNew(t *testing.T) { - randIdx := int64(rand.Int() % len(verifiersPub)) + randIdx := uint32(rand.Int() % len(verifiersPub)) v, err := NewVerifier(suite, verifiersSec[randIdx], dealerPub, verifiersPub) assert.NoError(t, err) assert.Equal(t, randIdx, v.index) @@ -113,15 +113,15 @@ func TestVSSShare(t *testing.T) { aggr := ver.aggregator - for i := int64(1); i < aggr.t-1; i++ { - aggr.responses[uint32(i)] = &Response{Approved: true} + for i := uint32(1); i < aggr.t-1; i++ { + aggr.responses[i] = &Response{Approved: true} } ver.SetTimeout() // not enough approvals assert.Nil(t, ver.Deal()) - aggr.responses[uint32(aggr.t)] = &Response{Approved: true} + aggr.responses[aggr.t] = &Response{Approved: true} // deal not certified aggr.badDealer = true assert.Nil(t, ver.Deal()) @@ -135,8 +135,8 @@ func TestVSSAggregatorEnoughApprovals(t *testing.T) { dealer := genDealer() aggr := dealer.aggregator // just below - for i := int64(0); i < aggr.t-1; i++ { - aggr.responses[uint32(i)] = &Response{Approved: true} + for i := uint32(0); i < aggr.t-1; i++ { + aggr.responses[i] = &Response{Approved: true} } dealer.SetTimeout() @@ -144,11 +144,11 @@ func TestVSSAggregatorEnoughApprovals(t *testing.T) { assert.False(t, aggr.EnoughApprovals()) assert.Nil(t, dealer.SecretCommit()) - aggr.responses[uint32(aggr.t)] = &Response{Approved: true} + aggr.responses[aggr.t] = &Response{Approved: true} assert.True(t, aggr.EnoughApprovals()) - for i := aggr.t + 1; i < int64(nbVerifiers); i++ { - aggr.responses[uint32(i)] = &Response{Approved: true} + for i := aggr.t + 1; i < nbVerifiers; i++ { + aggr.responses[i] = &Response{Approved: true} } assert.True(t, aggr.EnoughApprovals()) assert.Equal(t, suite.Point().Mul(secret, nil), dealer.SecretCommit()) @@ -158,8 +158,8 @@ func TestVSSAggregatorDealCertified(t *testing.T) { dealer := genDealer() aggr := dealer.aggregator - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Approved: true} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Approved: true} } dealer.SetTimeout() @@ -172,8 +172,8 @@ func TestVSSAggregatorDealCertified(t *testing.T) { assert.Nil(t, dealer.SecretCommit()) // inconsistent state on purpose // too much complaints - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Approved: false} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Approved: false} } assert.False(t, aggr.DealCertified()) } @@ -230,10 +230,10 @@ func TestVSSVerifierReceiveDeal(t *testing.T) { require.NotNil(t, resp) assert.Equal(t, true, resp.Approved) assert.Nil(t, err) - assert.Equal(t, v.index, int64(resp.Index)) + assert.Equal(t, v.index, resp.Index) assert.Equal(t, dealer.sid, resp.SessionID) assert.Nil(t, schnorr.Verify(suite, v.pub, resp.Hash(suite), resp.Signature)) - assert.Equal(t, v.responses[uint32(v.index)], resp) + assert.Equal(t, v.responses[v.index], resp) // wrong encryption goodSig := encD.Signature @@ -245,7 +245,7 @@ func TestVSSVerifierReceiveDeal(t *testing.T) { // wrong index goodIdx := d.SecShare.I - d.SecShare.I = (goodIdx - 1) % int64(nbVerifiers) + d.SecShare.I = (goodIdx - 1) % nbVerifiers encD, _ = dealer.EncryptedDeal(0) resp, err = v.ProcessEncryptedDeal(encD) assert.Error(t, err) @@ -443,9 +443,7 @@ func TestVSSAggregatorVerifyDeal(t *testing.T) { deal.RndShare.I = goodI // index not in bounds - deal.SecShare.I = -1 - assert.Error(t, aggr.VerifyDeal(deal, false)) - deal.SecShare.I = int64(len(verifiersPub)) + deal.SecShare.I = uint32(len(verifiersPub)) assert.Error(t, aggr.VerifyDeal(deal, false)) // shares invalid in respect to the commitments @@ -477,8 +475,8 @@ func TestVSSAggregatorCleanVerifiers(t *testing.T) { dealer := genDealer() aggr := dealer.aggregator - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Approved: true} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Approved: true} } assert.True(t, aggr.EnoughApprovals()) @@ -493,8 +491,8 @@ func TestVSSDealerSetTimeout(t *testing.T) { dealer := genDealer() aggr := dealer.aggregator - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Approved: true} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Approved: true} } assert.True(t, aggr.EnoughApprovals()) @@ -520,8 +518,8 @@ func TestVSSVerifierSetTimeout(t *testing.T) { aggr := ver.aggregator - for i := int64(0); i < aggr.t; i++ { - aggr.responses[uint32(i)] = &Response{Approved: true} + for i := uint32(0); i < aggr.t; i++ { + aggr.responses[i] = &Response{Approved: true} } assert.True(t, aggr.EnoughApprovals()) @@ -535,16 +533,16 @@ func TestVSSVerifierSetTimeout(t *testing.T) { func TestVSSSessionID(t *testing.T) { dealer, _ := NewDealer(suite, dealerSec, secret, verifiersPub, vssThreshold) commitments := dealer.deals[0].Commitments - sid, err := sessionID(suite, dealerPub, verifiersPub, commitments, int(dealer.t)) + sid, err := sessionID(suite, dealerPub, verifiersPub, commitments, dealer.t) assert.NoError(t, err) - sid2, err2 := sessionID(suite, dealerPub, verifiersPub, commitments, int(dealer.t)) + sid2, err2 := sessionID(suite, dealerPub, verifiersPub, commitments, dealer.t) assert.NoError(t, err2) assert.Equal(t, sid, sid2) wrongDealerPub := suite.Point().Add(dealerPub, dealerPub) - sid3, err3 := sessionID(suite, wrongDealerPub, verifiersPub, commitments, int(dealer.t)) + sid3, err3 := sessionID(suite, wrongDealerPub, verifiersPub, commitments, dealer.t) assert.NoError(t, err3) assert.NotEqual(t, sid3, sid2) } @@ -577,10 +575,10 @@ func genPair() (kyber.Scalar, kyber.Point) { return secret, public } -func genCommits(n int) ([]kyber.Scalar, []kyber.Point) { +func genCommits(n uint32) ([]kyber.Scalar, []kyber.Point) { var secrets = make([]kyber.Scalar, n) var publics = make([]kyber.Point, n) - for i := 0; i < n; i++ { + for i := uint32(0); i < n; i++ { secrets[i], publics[i] = genPair() } return secrets, publics @@ -594,7 +592,7 @@ func genDealer() *Dealer { func genAll() (*Dealer, []*Verifier) { dealer := genDealer() var verifiers = make([]*Verifier, nbVerifiers) - for i := 0; i < nbVerifiers; i++ { + for i := uint32(0); i < nbVerifiers; i++ { v, _ := NewVerifier(suite, verifiersSec[i], dealerPub, verifiersPub) verifiers[i] = v } diff --git a/shuffle/biffle.go b/shuffle/biffle.go index 47ca6e95c..63b33bf33 100644 --- a/shuffle/biffle.go +++ b/shuffle/biffle.go @@ -53,7 +53,7 @@ func Biffle(suite Suite, G, H kyber.Point, // Pick the single-bit permutation. var buf [1]byte random.Bytes(buf[:], rand) - bit := int64(buf[0] & 1) + bit := int32(buf[0] & 1) // Pick a fresh ElGamal blinding factor for each pair var beta [2]kyber.Scalar @@ -62,7 +62,7 @@ func Biffle(suite Suite, G, H kyber.Point, } // Create the output pair vectors - for i := int64(0); i < 2; i++ { + for i := int32(0); i < 2; i++ { piI := i ^ bit Xbar[i] = suite.Point().Mul(beta[piI], G) Xbar[i].Add(Xbar[i], X[piI]) @@ -75,7 +75,7 @@ func Biffle(suite Suite, G, H kyber.Point, "beta0": beta[0], "beta1": beta[1]} points := bifflePoints(suite, G, H, X, Y, Xbar, Ybar) - choice := map[proof.Predicate]int64{or: bit} + choice := map[proof.Predicate]int32{or: bit} prover = or.Prover(suite, secrets, points, choice) return } diff --git a/shuffle/pair.go b/shuffle/pair.go index 213a1529a..9be69550f 100644 --- a/shuffle/pair.go +++ b/shuffle/pair.go @@ -88,7 +88,7 @@ type ega6 struct { // and compute the correctness proof. type PairShuffle struct { grp kyber.Group - k int64 + k int p1 ega1 v2 ega2 p3 ega3 @@ -108,7 +108,7 @@ func (ps *PairShuffle) Init(grp kyber.Group, k int) *PairShuffle { // Create a well-formed PairShuffleProof with arrays correctly sized. ps.grp = grp - ps.k = int64(k) + ps.k = k ps.p1.A = make([]kyber.Point, k) ps.p1.C = make([]kyber.Point, k) ps.p1.U = make([]kyber.Point, k) @@ -129,13 +129,13 @@ func (ps *PairShuffle) Prove( grp := ps.grp k := ps.k - if k != int64(len(pi)) || k != int64(len(beta)) { + if k != len(pi) || k != len(beta) { panic("mismatched vector lengths") } // Compute pi^-1 inverse permutation - piinv := make([]int64, k) - for i := int64(0); i < k; i++ { + piinv := make([]int, k) + for i := 0; i < k; i++ { piinv[pi[i]] = i } @@ -158,7 +158,7 @@ func (ps *PairShuffle) Prove( p1.Lambda2 = grp.Point().Null() XY := grp.Point() // scratch wu := grp.Scalar() // scratch - for i := int64(0); i < k; i++ { + for i := 0; i < k; i++ { p1.A[i] = grp.Point().Mul(a[i], g) p1.C[i] = grp.Point().Mul(z.Mul(gamma, a[pi[i]]), g) p1.U[i] = grp.Point().Mul(u[i], g) @@ -179,7 +179,7 @@ func (ps *PairShuffle) Prove( return err } B := make([]kyber.Point, k) - for i := int64(0); i < k; i++ { + for i := 0; i < k; i++ { P := grp.Point().Mul(v2.Zrho[i], g) B[i] = P.Sub(P, p1.U[i]) } @@ -187,11 +187,11 @@ func (ps *PairShuffle) Prove( // P step 3 p3 := &ps.p3 b := make([]kyber.Scalar, k) - for i := int64(0); i < k; i++ { + for i := 0; i < k; i++ { b[i] = grp.Scalar().Sub(v2.Zrho[i], u[i]) } d := make([]kyber.Scalar, k) - for i := int64(0); i < k; i++ { + for i := 0; i < k; i++ { d[i] = grp.Scalar().Mul(gamma, b[pi[i]]) p3.D[i] = grp.Point().Mul(d[i], g) } @@ -208,15 +208,15 @@ func (ps *PairShuffle) Prove( // P step 5 p5 := &ps.p5 r := make([]kyber.Scalar, k) - for i := int64(0); i < k; i++ { + for i := 0; i < k; i++ { r[i] = grp.Scalar().Add(a[i], z.Mul(v4.Zlambda, b[i])) } s := make([]kyber.Scalar, k) - for i := int64(0); i < k; i++ { + for i := 0; i < k; i++ { s[i] = grp.Scalar().Mul(gamma, r[pi[i]]) } p5.Ztau = grp.Scalar().Neg(tau0) - for i := int64(0); i < k; i++ { + for i := 0; i < k; i++ { p5.Zsigma[i] = grp.Scalar().Add(w[i], b[pi[i]]) p5.Ztau.Add(p5.Ztau, z.Mul(b[i], beta[i])) } @@ -235,7 +235,7 @@ func (ps *PairShuffle) Verify( // Validate all vector lengths grp := ps.grp - k := int(ps.k) + k := ps.k if len(X) != k || len(Y) != k || len(Xbar) != k || len(Ybar) != k { panic("mismatched vector lengths") } diff --git a/sign/cosi/cosi.go b/sign/cosi/cosi.go index 41f59af19..cda36af3d 100644 --- a/sign/cosi/cosi.go +++ b/sign/cosi/cosi.go @@ -235,9 +235,9 @@ func Verify(suite Suite, publics []kyber.Point, message, sig []byte, policy Poli // and the number of participants. type ParticipationMask interface { // CountEnabled returns the number of participants - CountEnabled() int + CountEnabled() uint32 // CountTotal returns the number of candidates - CountTotal() int + CountTotal() uint32 } // Mask represents a cosigning participation bitmask. @@ -349,9 +349,9 @@ func (m *Mask) KeyEnabled(public kyber.Point) (bool, error) { // CountEnabled returns the number of enabled nodes in the CoSi participation // mask. -func (m *Mask) CountEnabled() int { +func (m *Mask) CountEnabled() uint32 { // hw is hamming weight - hw := 0 + hw := uint32(0) for i := range m.publics { byt := i >> 3 msk := byte(1) << uint(i&7) @@ -363,8 +363,8 @@ func (m *Mask) CountEnabled() int { } // CountTotal returns the total number of nodes this CoSi instance knows. -func (m *Mask) CountTotal() int { - return len(m.publics) +func (m *Mask) CountTotal() uint32 { + return uint32(len(m.publics)) } // AggregateMasks computes the bitwise OR of the two given participation masks. @@ -411,18 +411,18 @@ func (p CompletePolicy) Check(m ParticipationMask) bool { // // Deprecated: the policy has moved to the package kyber/sign type ThresholdPolicy struct { - thold int64 + thold uint32 } // NewThresholdPolicy returns a new ThresholdPolicy with the given threshold. // // Deprecated: the policy has moved to the package kyber/sign -func NewThresholdPolicy(thold int) *ThresholdPolicy { - return &ThresholdPolicy{thold: int64(thold)} +func NewThresholdPolicy(thold uint32) *ThresholdPolicy { + return &ThresholdPolicy{thold: thold} } // Check verifies that at least a threshold number of participants have // contributed to a collective signature. func (p ThresholdPolicy) Check(m ParticipationMask) bool { - return int64(m.CountEnabled()) >= p.thold + return m.CountEnabled() >= p.thold } diff --git a/sign/cosi/cosi_test.go b/sign/cosi/cosi_test.go index 437e11e98..15a325e1e 100644 --- a/sign/cosi/cosi_test.go +++ b/sign/cosi/cosi_test.go @@ -114,7 +114,7 @@ func testCoSi(t *testing.T, n, f int) { if f == 0 { p = nil } else { - p = NewThresholdPolicy(n - f) + p = NewThresholdPolicy(uint32(n - f)) } // send a short sig in, expect an error if err := Verify(testSuite, publics, message, sig[0:10], p); err == nil { @@ -170,7 +170,7 @@ func TestMask(t *testing.T) { t.Fatal(err) } - if masks[0].CountEnabled() != n { + if masks[0].CountEnabled() != uint32(n) { t.Fatal(errors.New("unexpected number of active indices")) } diff --git a/sign/dss/dss.go b/sign/dss/dss.go index a308fbc82..9093fc06f 100644 --- a/sign/dss/dss.go +++ b/sign/dss/dss.go @@ -46,7 +46,7 @@ type DSS struct { suite Suite secret kyber.Scalar public kyber.Point - index int32 + index uint32 participants []kyber.Point T uint32 long DistKeyShare @@ -55,7 +55,7 @@ type DSS struct { randomPoly *share.PubPoly msg []byte partials []*share.PriShare - partialsIdx map[int32]bool + partialsIdx map[uint32]bool signed bool sessionID []byte } @@ -76,12 +76,12 @@ type PartialSig struct { func NewDSS(suite Suite, secret kyber.Scalar, participants []kyber.Point, long, random DistKeyShare, msg []byte, T uint32) (*DSS, error) { public := suite.Point().Mul(secret, nil) - var i int32 + var i uint32 var found bool for j, p := range participants { if p.Equal(public) { found = true - i = int32(j) + i = uint32(j) break } } @@ -100,7 +100,7 @@ func NewDSS(suite Suite, secret kyber.Scalar, participants []kyber.Point, randomPoly: share.NewPubPoly(suite, suite.Point().Base(), random.Commitments()), msg: msg, T: T, - partialsIdx: make(map[int32]bool), + partialsIdx: make(map[uint32]bool), sessionID: sessionID(suite, long, random), }, nil } @@ -224,8 +224,8 @@ func (ps *PartialSig) Hash(s Suite) []byte { return h.Sum(nil) } -func findPub(list []kyber.Point, i int32) (kyber.Point, bool) { - if i >= int32(len(list)) { +func findPub(list []kyber.Point, i uint32) (kyber.Point, bool) { + if i >= uint32(len(list)) { return nil, false } return list[i], true diff --git a/sign/dss/dss_test.go b/sign/dss/dss_test.go index 4dc891a13..6aadf65ea 100644 --- a/sign/dss/dss_test.go +++ b/sign/dss/dss_test.go @@ -15,7 +15,7 @@ import ( var suite = edwards25519.NewBlakeSHA256Ed25519() -var nbParticipants = 7 +var nbParticipants = uint32(7) var t = nbParticipants/2 + 1 var partPubs []kyber.Point @@ -29,7 +29,7 @@ var dss []*DSS func init() { partPubs = make([]kyber.Point, nbParticipants) partSec = make([]kyber.Scalar, nbParticipants) - for i := 0; i < nbParticipants; i++ { + for i := uint32(0); i < nbParticipants; i++ { sec, pub := genPair() partPubs[i] = pub partSec[i] = sec @@ -98,7 +98,7 @@ func TestDSSPartialSigs(t *testing.T) { assert.Contains(t, err.Error(), "not enough") // enough partial sigs ? - for i := 2; i < nbParticipants; i++ { + for i := uint32(2); i < nbParticipants; i++ { dss := getDSS(i) ps, err := dss.PartialSig() require.Nil(t, err) @@ -110,7 +110,7 @@ func TestDSSPartialSigs(t *testing.T) { func TestDSSSignature(t *testing.T) { dsss := make([]*DSS, nbParticipants) pss := make([]*PartialSig, nbParticipants) - for i := 0; i < nbParticipants; i++ { + for i := uint32(0); i < nbParticipants; i++ { dsss[i] = getDSS(i) ps, err := dsss[i].PartialSig() require.Nil(t, err) @@ -135,7 +135,7 @@ func TestDSSSignature(t *testing.T) { assert.Nil(t, Verify(longterms[0].Public(), dss0.msg, buff)) } -func getDSS(i int) *DSS { +func getDSS(i uint32) *DSS { dss, err := NewDSS(suite, partSec[i], partPubs, longterms[i], randoms[i], []byte("hello"), t) if dss == nil || err != nil { panic("nil dss") @@ -145,7 +145,7 @@ func getDSS(i int) *DSS { func genDistSecret() []*dkg.DistKeyShare { dkgs := make([]*dkg.DistKeyGenerator, nbParticipants) - for i := 0; i < nbParticipants; i++ { + for i := uint32(0); i < nbParticipants; i++ { dkg, err := dkg.NewDistKeyGenerator(suite, partSec[i], partPubs, nbParticipants/2+1) if err != nil { panic(err) diff --git a/sign/policy.go b/sign/policy.go index 676760804..6fd930654 100644 --- a/sign/policy.go +++ b/sign/policy.go @@ -4,9 +4,9 @@ package sign // and the number of participants. type ParticipationMask interface { // CountEnabled returns the number of participants - CountEnabled() int + CountEnabled() uint32 // CountTotal returns the number of candidates - CountTotal() int + CountTotal() uint32 } // Policy represents a fully customizable cosigning policy deciding what @@ -35,16 +35,16 @@ func (p CompletePolicy) Check(m ParticipationMask) bool { // least the given threshold number of participants t have cosigned to make a // collective signature valid. type ThresholdPolicy struct { - thold int64 + thold uint32 } // NewThresholdPolicy returns a new ThresholdPolicy with the given threshold. -func NewThresholdPolicy(thold int) *ThresholdPolicy { - return &ThresholdPolicy{thold: int64(thold)} +func NewThresholdPolicy(thold uint32) *ThresholdPolicy { + return &ThresholdPolicy{thold: thold} } // Check verifies that at least a threshold number of participants have // contributed to a collective signature. func (p ThresholdPolicy) Check(m ParticipationMask) bool { - return int64(m.CountEnabled()) >= p.thold + return m.CountEnabled() >= p.thold } diff --git a/sign/policy_test.go b/sign/policy_test.go index faaf065fb..492676257 100644 --- a/sign/policy_test.go +++ b/sign/policy_test.go @@ -7,15 +7,15 @@ import ( ) type testMask struct { - numCandidates int - numParticipants int + numCandidates uint32 + numParticipants uint32 } -func (m testMask) CountTotal() int { +func (m testMask) CountTotal() uint32 { return m.numCandidates } -func (m testMask) CountEnabled() int { +func (m testMask) CountEnabled() uint32 { return m.numParticipants } diff --git a/sign/tbls/tbls.go b/sign/tbls/tbls.go index e2270b748..dd44a2f3f 100644 --- a/sign/tbls/tbls.go +++ b/sign/tbls/tbls.go @@ -25,14 +25,14 @@ import ( type SigShare []byte // Index returns the index i of the TBLS share Si. -func (s SigShare) Index() (int32, error) { +func (s SigShare) Index() (uint32, error) { var index uint16 buf := bytes.NewReader(s) err := binary.Read(buf, binary.BigEndian, &index) if err != nil { - return -1, err + return 0, err } - return int32(index), nil + return uint32(index), nil } // Value returns the value v of the TBLS share Si. diff --git a/sign/tbls/tbls_test.go b/sign/tbls/tbls_test.go index 5254bb7f6..5672e869b 100644 --- a/sign/tbls/tbls_test.go +++ b/sign/tbls/tbls_test.go @@ -13,7 +13,7 @@ func TestTBLS(test *testing.T) { var err error msg := []byte("Hello threshold Boneh-Lynn-Shacham") suite := bn256.NewSuite() - n := 10 + n := uint32(10) t := n/2 + 1 secret := suite.G1().Scalar().Pick(suite.RandomStream()) priPoly := share.NewPriPoly(suite.G2(), t, secret, suite.RandomStream())