summary refs log tree commit diff
path: root/vendor/filippo.io/edwards25519
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/filippo.io/edwards25519')
-rw-r--r--vendor/filippo.io/edwards25519/LICENSE27
-rw-r--r--vendor/filippo.io/edwards25519/README.md14
-rw-r--r--vendor/filippo.io/edwards25519/doc.go20
-rw-r--r--vendor/filippo.io/edwards25519/edwards25519.go427
-rw-r--r--vendor/filippo.io/edwards25519/extra.go349
-rw-r--r--vendor/filippo.io/edwards25519/field/fe.go420
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_amd64.go16
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_amd64.s379
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go12
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_arm64.go16
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_arm64.s42
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go12
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_extra.go50
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_generic.go266
-rw-r--r--vendor/filippo.io/edwards25519/scalar.go343
-rw-r--r--vendor/filippo.io/edwards25519/scalar_fiat.go1147
-rw-r--r--vendor/filippo.io/edwards25519/scalarmult.go214
-rw-r--r--vendor/filippo.io/edwards25519/tables.go129
18 files changed, 3883 insertions, 0 deletions
diff --git a/vendor/filippo.io/edwards25519/LICENSE b/vendor/filippo.io/edwards25519/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md
new file mode 100644
index 0000000..24e2457
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/README.md
@@ -0,0 +1,14 @@
+# filippo.io/edwards25519
+
+```
+import "filippo.io/edwards25519"
+```
+
+This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
+Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
+
+The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality.
+
+Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
+
+Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.
diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go
new file mode 100644
index 0000000..ab6aaeb
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/doc.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edwards25519 implements group logic for the twisted Edwards curve
+//
+//	-x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
+//
+// This is better known as the Edwards curve equivalent to Curve25519, and is
+// the curve used by the Ed25519 signature scheme.
+//
+// Most users don't need this package, and should instead use crypto/ed25519 for
+// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
+// github.com/gtank/ristretto255 for prime order group logic.
+//
+// However, developers who do need to interact with low-level edwards25519
+// operations can use this package, which is an extended version of
+// crypto/internal/edwards25519 from the standard library repackaged as
+// an importable module.
+package edwards25519
diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go
new file mode 100644
index 0000000..a744da2
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/edwards25519.go
@@ -0,0 +1,427 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+	"errors"
+
+	"filippo.io/edwards25519/field"
+)
+
+// Point types.
+
+type projP1xP1 struct {
+	X, Y, Z, T field.Element
+}
+
+type projP2 struct {
+	X, Y, Z field.Element
+}
+
+// Point represents a point on the edwards25519 curve.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is NOT valid, and it may be used only as a receiver.
+type Point struct {
+	// Make the type not comparable (i.e. used with == or as a map key), as
+	// equivalent points can be represented by different Go values.
+	_ incomparable
+
+	// The point is internally represented in extended coordinates (X, Y, Z, T)
+	// where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
+	x, y, z, t field.Element
+}
+
+type incomparable [0]func()
+
+func checkInitialized(points ...*Point) {
+	for _, p := range points {
+		if p.x == (field.Element{}) && p.y == (field.Element{}) {
+			panic("edwards25519: use of uninitialized Point")
+		}
+	}
+}
+
+type projCached struct {
+	YplusX, YminusX, Z, T2d field.Element
+}
+
+type affineCached struct {
+	YplusX, YminusX, T2d field.Element
+}
+
+// Constructors.
+
+func (v *projP2) Zero() *projP2 {
+	v.X.Zero()
+	v.Y.One()
+	v.Z.One()
+	return v
+}
+
+// identity is the point at infinity.
+var identity, _ = new(Point).SetBytes([]byte{
+	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+
+// NewIdentityPoint returns a new Point set to the identity.
+func NewIdentityPoint() *Point {
+	return new(Point).Set(identity)
+}
+
+// generator is the canonical curve basepoint. See TestGenerator for the
+// correspondence of this encoding with the values in RFC 8032.
+var generator, _ = new(Point).SetBytes([]byte{
+	0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+	0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+	0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+	0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
+
+// NewGeneratorPoint returns a new Point set to the canonical generator.
+func NewGeneratorPoint() *Point {
+	return new(Point).Set(generator)
+}
+
+func (v *projCached) Zero() *projCached {
+	v.YplusX.One()
+	v.YminusX.One()
+	v.Z.One()
+	v.T2d.Zero()
+	return v
+}
+
+func (v *affineCached) Zero() *affineCached {
+	v.YplusX.One()
+	v.YminusX.One()
+	v.T2d.Zero()
+	return v
+}
+
+// Assignments.
+
+// Set sets v = u, and returns v.
+func (v *Point) Set(u *Point) *Point {
+	*v = *u
+	return v
+}
+
+// Encoding.
+
+// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
+// Section 5.1.2.
+func (v *Point) Bytes() []byte {
+	// This function is outlined to make the allocations inline in the caller
+	// rather than happen on the heap.
+	var buf [32]byte
+	return v.bytes(&buf)
+}
+
+func (v *Point) bytes(buf *[32]byte) []byte {
+	checkInitialized(v)
+
+	var zInv, x, y field.Element
+	zInv.Invert(&v.z)       // zInv = 1 / Z
+	x.Multiply(&v.x, &zInv) // x = X / Z
+	y.Multiply(&v.y, &zInv) // y = Y / Z
+
+	out := copyFieldElement(buf, &y)
+	out[31] |= byte(x.IsNegative() << 7)
+	return out
+}
+
+var feOne = new(field.Element).One()
+
+// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
+// represent a valid point on the curve, SetBytes returns nil and an error and
+// the receiver is unchanged. Otherwise, SetBytes returns v.
+//
+// Note that SetBytes accepts all non-canonical encodings of valid points.
+// That is, it follows decoding rules that match most implementations in
+// the ecosystem rather than RFC 8032.
+func (v *Point) SetBytes(x []byte) (*Point, error) {
+	// Specifically, the non-canonical encodings that are accepted are
+	//   1) the ones where the field element is not reduced (see the
+	//      (*field.Element).SetBytes docs) and
+	//   2) the ones where the x-coordinate is zero and the sign bit is set.
+	//
+	// Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
+	// specifically the "Canonical A, R" section.
+
+	y, err := new(field.Element).SetBytes(x)
+	if err != nil {
+		return nil, errors.New("edwards25519: invalid point encoding length")
+	}
+
+	// -x² + y² = 1 + dx²y²
+	// x² + dx²y² = x²(dy² + 1) = y² - 1
+	// x² = (y² - 1) / (dy² + 1)
+
+	// u = y² - 1
+	y2 := new(field.Element).Square(y)
+	u := new(field.Element).Subtract(y2, feOne)
+
+	// v = dy² + 1
+	vv := new(field.Element).Multiply(y2, d)
+	vv = vv.Add(vv, feOne)
+
+	// x = +√(u/v)
+	xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
+	if wasSquare == 0 {
+		return nil, errors.New("edwards25519: invalid point encoding")
+	}
+
+	// Select the negative square root if the sign bit is set.
+	xxNeg := new(field.Element).Negate(xx)
+	xx = xx.Select(xxNeg, xx, int(x[31]>>7))
+
+	v.x.Set(xx)
+	v.y.Set(y)
+	v.z.One()
+	v.t.Multiply(xx, y) // xy = T / Z
+
+	return v, nil
+}
+
+func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
+	copy(buf[:], v.Bytes())
+	return buf[:]
+}
+
+// Conversions.
+
+func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
+	v.X.Multiply(&p.X, &p.T)
+	v.Y.Multiply(&p.Y, &p.Z)
+	v.Z.Multiply(&p.Z, &p.T)
+	return v
+}
+
+func (v *projP2) FromP3(p *Point) *projP2 {
+	v.X.Set(&p.x)
+	v.Y.Set(&p.y)
+	v.Z.Set(&p.z)
+	return v
+}
+
+func (v *Point) fromP1xP1(p *projP1xP1) *Point {
+	v.x.Multiply(&p.X, &p.T)
+	v.y.Multiply(&p.Y, &p.Z)
+	v.z.Multiply(&p.Z, &p.T)
+	v.t.Multiply(&p.X, &p.Y)
+	return v
+}
+
+func (v *Point) fromP2(p *projP2) *Point {
+	v.x.Multiply(&p.X, &p.Z)
+	v.y.Multiply(&p.Y, &p.Z)
+	v.z.Square(&p.Z)
+	v.t.Multiply(&p.X, &p.Y)
+	return v
+}
+
+// d is a constant in the curve equation.
+var d, _ = new(field.Element).SetBytes([]byte{
+	0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
+	0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
+	0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
+	0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
+var d2 = new(field.Element).Add(d, d)
+
+func (v *projCached) FromP3(p *Point) *projCached {
+	v.YplusX.Add(&p.y, &p.x)
+	v.YminusX.Subtract(&p.y, &p.x)
+	v.Z.Set(&p.z)
+	v.T2d.Multiply(&p.t, d2)
+	return v
+}
+
+func (v *affineCached) FromP3(p *Point) *affineCached {
+	v.YplusX.Add(&p.y, &p.x)
+	v.YminusX.Subtract(&p.y, &p.x)
+	v.T2d.Multiply(&p.t, d2)
+
+	var invZ field.Element
+	invZ.Invert(&p.z)
+	v.YplusX.Multiply(&v.YplusX, &invZ)
+	v.YminusX.Multiply(&v.YminusX, &invZ)
+	v.T2d.Multiply(&v.T2d, &invZ)
+	return v
+}
+
+// (Re)addition and subtraction.
+
+// Add sets v = p + q, and returns v.
+func (v *Point) Add(p, q *Point) *Point {
+	checkInitialized(p, q)
+	qCached := new(projCached).FromP3(q)
+	result := new(projP1xP1).Add(p, qCached)
+	return v.fromP1xP1(result)
+}
+
+// Subtract sets v = p - q, and returns v.
+func (v *Point) Subtract(p, q *Point) *Point {
+	checkInitialized(p, q)
+	qCached := new(projCached).FromP3(q)
+	result := new(projP1xP1).Sub(p, qCached)
+	return v.fromP1xP1(result)
+}
+
+func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
+	var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+	YplusX.Add(&p.y, &p.x)
+	YminusX.Subtract(&p.y, &p.x)
+
+	PP.Multiply(&YplusX, &q.YplusX)
+	MM.Multiply(&YminusX, &q.YminusX)
+	TT2d.Multiply(&p.t, &q.T2d)
+	ZZ2.Multiply(&p.z, &q.Z)
+
+	ZZ2.Add(&ZZ2, &ZZ2)
+
+	v.X.Subtract(&PP, &MM)
+	v.Y.Add(&PP, &MM)
+	v.Z.Add(&ZZ2, &TT2d)
+	v.T.Subtract(&ZZ2, &TT2d)
+	return v
+}
+
+func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
+	var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+	YplusX.Add(&p.y, &p.x)
+	YminusX.Subtract(&p.y, &p.x)
+
+	PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+	MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+	TT2d.Multiply(&p.t, &q.T2d)
+	ZZ2.Multiply(&p.z, &q.Z)
+
+	ZZ2.Add(&ZZ2, &ZZ2)
+
+	v.X.Subtract(&PP, &MM)
+	v.Y.Add(&PP, &MM)
+	v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
+	v.T.Add(&ZZ2, &TT2d)      // flipped sign
+	return v
+}
+
+func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
+	var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+	YplusX.Add(&p.y, &p.x)
+	YminusX.Subtract(&p.y, &p.x)
+
+	PP.Multiply(&YplusX, &q.YplusX)
+	MM.Multiply(&YminusX, &q.YminusX)
+	TT2d.Multiply(&p.t, &q.T2d)
+
+	Z2.Add(&p.z, &p.z)
+
+	v.X.Subtract(&PP, &MM)
+	v.Y.Add(&PP, &MM)
+	v.Z.Add(&Z2, &TT2d)
+	v.T.Subtract(&Z2, &TT2d)
+	return v
+}
+
+func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
+	var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+	YplusX.Add(&p.y, &p.x)
+	YminusX.Subtract(&p.y, &p.x)
+
+	PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+	MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+	TT2d.Multiply(&p.t, &q.T2d)
+
+	Z2.Add(&p.z, &p.z)
+
+	v.X.Subtract(&PP, &MM)
+	v.Y.Add(&PP, &MM)
+	v.Z.Subtract(&Z2, &TT2d) // flipped sign
+	v.T.Add(&Z2, &TT2d)      // flipped sign
+	return v
+}
+
+// Doubling.
+
+func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
+	var XX, YY, ZZ2, XplusYsq field.Element
+
+	XX.Square(&p.X)
+	YY.Square(&p.Y)
+	ZZ2.Square(&p.Z)
+	ZZ2.Add(&ZZ2, &ZZ2)
+	XplusYsq.Add(&p.X, &p.Y)
+	XplusYsq.Square(&XplusYsq)
+
+	v.Y.Add(&YY, &XX)
+	v.Z.Subtract(&YY, &XX)
+
+	v.X.Subtract(&XplusYsq, &v.Y)
+	v.T.Subtract(&ZZ2, &v.Z)
+	return v
+}
+
+// Negation.
+
+// Negate sets v = -p, and returns v.
+func (v *Point) Negate(p *Point) *Point {
+	checkInitialized(p)
+	v.x.Negate(&p.x)
+	v.y.Set(&p.y)
+	v.z.Set(&p.z)
+	v.t.Negate(&p.t)
+	return v
+}
+
+// Equal returns 1 if v is equivalent to u, and 0 otherwise.
+func (v *Point) Equal(u *Point) int {
+	checkInitialized(v, u)
+
+	var t1, t2, t3, t4 field.Element
+	t1.Multiply(&v.x, &u.z)
+	t2.Multiply(&u.x, &v.z)
+	t3.Multiply(&v.y, &u.z)
+	t4.Multiply(&u.y, &v.z)
+
+	return t1.Equal(&t2) & t3.Equal(&t4)
+}
+
+// Constant-time operations
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *projCached) Select(a, b *projCached, cond int) *projCached {
+	v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+	v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+	v.Z.Select(&a.Z, &b.Z, cond)
+	v.T2d.Select(&a.T2d, &b.T2d, cond)
+	return v
+}
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
+	v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+	v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+	v.T2d.Select(&a.T2d, &b.T2d, cond)
+	return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *projCached) CondNeg(cond int) *projCached {
+	v.YplusX.Swap(&v.YminusX, cond)
+	v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+	return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *affineCached) CondNeg(cond int) *affineCached {
+	v.YplusX.Swap(&v.YminusX, cond)
+	v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+	return v
+}
diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go
new file mode 100644
index 0000000..d152d68
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/extra.go
@@ -0,0 +1,349 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/internal/edwards25519 package.
+
+import (
+	"errors"
+
+	"filippo.io/edwards25519/field"
+)
+
+// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
+	// This function is outlined to make the allocations inline in the caller
+	// rather than happen on the heap. Don't change the style without making
+	// sure it doesn't increase the inliner cost.
+	var e [4]field.Element
+	X, Y, Z, T = v.extendedCoordinates(&e)
+	return
+}
+
+func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
+	checkInitialized(v)
+	X = e[0].Set(&v.x)
+	Y = e[1].Set(&v.y)
+	Z = e[2].Set(&v.z)
+	T = e[3].Set(&v.t)
+	return
+}
+
+// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+//
+// If the coordinates are invalid or don't represent a valid point on the curve,
+// SetExtendedCoordinates returns nil and an error and the receiver is
+// unchanged. Otherwise, SetExtendedCoordinates returns v.
+func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
+	if !isOnCurve(X, Y, Z, T) {
+		return nil, errors.New("edwards25519: invalid point coordinates")
+	}
+	v.x.Set(X)
+	v.y.Set(Y)
+	v.z.Set(Z)
+	v.t.Set(T)
+	return v, nil
+}
+
+func isOnCurve(X, Y, Z, T *field.Element) bool {
+	var lhs, rhs field.Element
+	XX := new(field.Element).Square(X)
+	YY := new(field.Element).Square(Y)
+	ZZ := new(field.Element).Square(Z)
+	TT := new(field.Element).Square(T)
+	// -x² + y² = 1 + dx²y²
+	// -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
+	// -X² + Y² = Z² + dT²
+	lhs.Subtract(YY, XX)
+	rhs.Multiply(d, TT).Add(&rhs, ZZ)
+	if lhs.Equal(&rhs) != 1 {
+		return false
+	}
+	// xy = T/Z
+	// XY/Z² = T/Z
+	// XY = TZ
+	lhs.Multiply(X, Y)
+	rhs.Multiply(T, Z)
+	return lhs.Equal(&rhs) == 1
+}
+
+// BytesMontgomery converts v to a point on the birationally-equivalent
+// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
+// according to RFC 7748.
+//
+// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
+// to the same value. If v is the identity point, BytesMontgomery returns 32
+// zero bytes, analogously to the X25519 function.
+//
+// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate:
+// while every valid edwards25519 point has a unique u-coordinate Montgomery
+// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond
+// to any edwards25519 point, and every other X25519 input corresponds to two
+// edwards25519 points.
+func (v *Point) BytesMontgomery() []byte {
+	// This function is outlined to make the allocations inline in the caller
+	// rather than happen on the heap.
+	var buf [32]byte
+	return v.bytesMontgomery(&buf)
+}
+
+func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
+	checkInitialized(v)
+
+	// RFC 7748, Section 4.1 provides the bilinear map to calculate the
+	// Montgomery u-coordinate
+	//
+	//              u = (1 + y) / (1 - y)
+	//
+	// where y = Y / Z.
+
+	var y, recip, u field.Element
+
+	y.Multiply(&v.y, y.Invert(&v.z))        // y = Y / Z
+	recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
+	u.Multiply(u.Add(feOne, &y), &recip)    // u = (1 + y)*r
+
+	return copyFieldElement(buf, &u)
+}
+
+// MultByCofactor sets v = 8 * p, and returns v.
+func (v *Point) MultByCofactor(p *Point) *Point {
+	checkInitialized(p)
+	result := projP1xP1{}
+	pp := (&projP2{}).FromP3(p)
+	result.Double(pp)
+	pp.FromP1xP1(&result)
+	result.Double(pp)
+	pp.FromP1xP1(&result)
+	result.Double(pp)
+	return v.fromP1xP1(&result)
+}
+
+// Given k > 0, set s = s**(2*i).
+func (s *Scalar) pow2k(k int) {
+	for i := 0; i < k; i++ {
+		s.Multiply(s, s)
+	}
+}
+
+// Invert sets s to the inverse of a nonzero scalar v, and returns s.
+//
+// If t is zero, Invert returns zero.
+func (s *Scalar) Invert(t *Scalar) *Scalar {
+	// Uses a hardcoded sliding window of width 4.
+	var table [8]Scalar
+	var tt Scalar
+	tt.Multiply(t, t)
+	table[0] = *t
+	for i := 0; i < 7; i++ {
+		table[i+1].Multiply(&table[i], &tt)
+	}
+	// Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15]
+	// so t**k = t[k/2] for odd k
+
+	// To compute the sliding window digits, use the following Sage script:
+
+	// sage: import itertools
+	// sage: def sliding_window(w,k):
+	// ....:     digits = []
+	// ....:     while k > 0:
+	// ....:         if k % 2 == 1:
+	// ....:             kmod = k % (2**w)
+	// ....:             digits.append(kmod)
+	// ....:             k = k - kmod
+	// ....:         else:
+	// ....:             digits.append(0)
+	// ....:         k = k // 2
+	// ....:     return digits
+
+	// Now we can compute s roughly as follows:
+
+	// sage: s = 1
+	// sage: for coeff in reversed(sliding_window(4,l-2)):
+	// ....:     s = s*s
+	// ....:     if coeff > 0 :
+	// ....:         s = s*t**coeff
+
+	// This works on one bit at a time, with many runs of zeros.
+	// The digits can be collapsed into [(count, coeff)] as follows:
+
+	// sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
+
+	// Entries of the form (k, 0) turn into pow2k(k)
+	// Entries of the form (1, coeff) turn into a squaring and then a table lookup.
+	// We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
+
+	*s = table[1/2]
+	s.pow2k(127 + 1)
+	s.Multiply(s, &table[1/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[9/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[11/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[13/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[15/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[7/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[15/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[5/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[1/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[15/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[15/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[7/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[3/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[11/2])
+	s.pow2k(5 + 1)
+	s.Multiply(s, &table[11/2])
+	s.pow2k(9 + 1)
+	s.Multiply(s, &table[9/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[3/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[3/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[3/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[9/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[7/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[3/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[13/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[7/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[9/2])
+	s.pow2k(3 + 1)
+	s.Multiply(s, &table[15/2])
+	s.pow2k(4 + 1)
+	s.Multiply(s, &table[11/2])
+
+	return s
+}
+
+// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends only on the lengths of the two slices, which must match.
+func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+	if len(scalars) != len(points) {
+		panic("edwards25519: called MultiScalarMult with different size inputs")
+	}
+	checkInitialized(points...)
+
+	// Proceed as in the single-base case, but share doublings
+	// between each point in the multiscalar equation.
+
+	// Build lookup tables for each point
+	tables := make([]projLookupTable, len(points))
+	for i := range tables {
+		tables[i].FromP3(points[i])
+	}
+	// Compute signed radix-16 digits for each scalar
+	digits := make([][64]int8, len(scalars))
+	for i := range digits {
+		digits[i] = scalars[i].signedRadix16()
+	}
+
+	// Unwrap first loop iteration to save computing 16*identity
+	multiple := &projCached{}
+	tmp1 := &projP1xP1{}
+	tmp2 := &projP2{}
+	// Lookup-and-add the appropriate multiple of each input point
+	for j := range tables {
+		tables[j].SelectInto(multiple, digits[j][63])
+		tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
+		v.fromP1xP1(tmp1)     // update v
+	}
+	tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+	for i := 62; i >= 0; i-- {
+		tmp1.Double(tmp2)    // tmp1 =  2*(prev) in P1xP1 coords
+		tmp2.FromP1xP1(tmp1) // tmp2 =  2*(prev) in P2 coords
+		tmp1.Double(tmp2)    // tmp1 =  4*(prev) in P1xP1 coords
+		tmp2.FromP1xP1(tmp1) // tmp2 =  4*(prev) in P2 coords
+		tmp1.Double(tmp2)    // tmp1 =  8*(prev) in P1xP1 coords
+		tmp2.FromP1xP1(tmp1) // tmp2 =  8*(prev) in P2 coords
+		tmp1.Double(tmp2)    // tmp1 = 16*(prev) in P1xP1 coords
+		v.fromP1xP1(tmp1)    //    v = 16*(prev) in P3 coords
+		// Lookup-and-add the appropriate multiple of each input point
+		for j := range tables {
+			tables[j].SelectInto(multiple, digits[j][i])
+			tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
+			v.fromP1xP1(tmp1)     // update v
+		}
+		tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+	}
+	return v
+}
+
+// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+	if len(scalars) != len(points) {
+		panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
+	}
+	checkInitialized(points...)
+
+	// Generalize double-base NAF computation to arbitrary sizes.
+	// Here all the points are dynamic, so we only use the smaller
+	// tables.
+
+	// Build lookup tables for each point
+	tables := make([]nafLookupTable5, len(points))
+	for i := range tables {
+		tables[i].FromP3(points[i])
+	}
+	// Compute a NAF for each scalar
+	nafs := make([][256]int8, len(scalars))
+	for i := range nafs {
+		nafs[i] = scalars[i].nonAdjacentForm(5)
+	}
+
+	multiple := &projCached{}
+	tmp1 := &projP1xP1{}
+	tmp2 := &projP2{}
+	tmp2.Zero()
+
+	// Move from high to low bits, doubling the accumulator
+	// at each iteration and checking whether there is a nonzero
+	// coefficient to look up a multiple of.
+	//
+	// Skip trying to find the first nonzero coefficent, because
+	// searching might be more work than a few extra doublings.
+	for i := 255; i >= 0; i-- {
+		tmp1.Double(tmp2)
+
+		for j := range nafs {
+			if nafs[j][i] > 0 {
+				v.fromP1xP1(tmp1)
+				tables[j].SelectInto(multiple, nafs[j][i])
+				tmp1.Add(v, multiple)
+			} else if nafs[j][i] < 0 {
+				v.fromP1xP1(tmp1)
+				tables[j].SelectInto(multiple, -nafs[j][i])
+				tmp1.Sub(v, multiple)
+			}
+		}
+
+		tmp2.FromP1xP1(tmp1)
+	}
+
+	v.fromP2(tmp2)
+	return v
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go
new file mode 100644
index 0000000..5518ef2
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe.go
@@ -0,0 +1,420 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package field implements fast arithmetic modulo 2^255-19.
+package field
+
+import (
+	"crypto/subtle"
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+// Element represents an element of the field GF(2^255-19). Note that this
+// is not a cryptographically secure group, and should only be used to interact
+// with edwards25519.Point coordinates.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Element struct {
+	// An element t represents the integer
+	//     t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
+	//
+	// Between operations, all limbs are expected to be lower than 2^52.
+	l0 uint64
+	l1 uint64
+	l2 uint64
+	l3 uint64
+	l4 uint64
+}
+
+const maskLow51Bits uint64 = (1 << 51) - 1
+
+var feZero = &Element{0, 0, 0, 0, 0}
+
+// Zero sets v = 0, and returns v.
+func (v *Element) Zero() *Element {
+	*v = *feZero
+	return v
+}
+
+var feOne = &Element{1, 0, 0, 0, 0}
+
+// One sets v = 1, and returns v.
+func (v *Element) One() *Element {
+	*v = *feOne
+	return v
+}
+
+// reduce reduces v modulo 2^255 - 19 and returns it.
+func (v *Element) reduce() *Element {
+	v.carryPropagate()
+
+	// After the light reduction we now have a field element representation
+	// v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
+
+	// If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
+	// generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
+	c := (v.l0 + 19) >> 51
+	c = (v.l1 + c) >> 51
+	c = (v.l2 + c) >> 51
+	c = (v.l3 + c) >> 51
+	c = (v.l4 + c) >> 51
+
+	// If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
+	// effectively applying the reduction identity to the carry.
+	v.l0 += 19 * c
+
+	v.l1 += v.l0 >> 51
+	v.l0 = v.l0 & maskLow51Bits
+	v.l2 += v.l1 >> 51
+	v.l1 = v.l1 & maskLow51Bits
+	v.l3 += v.l2 >> 51
+	v.l2 = v.l2 & maskLow51Bits
+	v.l4 += v.l3 >> 51
+	v.l3 = v.l3 & maskLow51Bits
+	// no additional carry
+	v.l4 = v.l4 & maskLow51Bits
+
+	return v
+}
+
+// Add sets v = a + b, and returns v.
+func (v *Element) Add(a, b *Element) *Element {
+	v.l0 = a.l0 + b.l0
+	v.l1 = a.l1 + b.l1
+	v.l2 = a.l2 + b.l2
+	v.l3 = a.l3 + b.l3
+	v.l4 = a.l4 + b.l4
+	// Using the generic implementation here is actually faster than the
+	// assembly. Probably because the body of this function is so simple that
+	// the compiler can figure out better optimizations by inlining the carry
+	// propagation.
+	return v.carryPropagateGeneric()
+}
+
+// Subtract sets v = a - b, and returns v.
+func (v *Element) Subtract(a, b *Element) *Element {
+	// We first add 2 * p, to guarantee the subtraction won't underflow, and
+	// then subtract b (which can be up to 2^255 + 2^13 * 19).
+	v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
+	v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
+	v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
+	v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
+	v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
+	return v.carryPropagate()
+}
+
+// Negate sets v = -a, and returns v.
+func (v *Element) Negate(a *Element) *Element {
+	return v.Subtract(feZero, a)
+}
+
+// Invert sets v = 1/z mod p, and returns v.
+//
+// If z == 0, Invert returns v = 0.
+func (v *Element) Invert(z *Element) *Element {
+	// Inversion is implemented as exponentiation with exponent p − 2. It uses the
+	// same sequence of 255 squarings and 11 multiplications as [Curve25519].
+	var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
+
+	z2.Square(z)             // 2
+	t.Square(&z2)            // 4
+	t.Square(&t)             // 8
+	z9.Multiply(&t, z)       // 9
+	z11.Multiply(&z9, &z2)   // 11
+	t.Square(&z11)           // 22
+	z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
+
+	t.Square(&z2_5_0) // 2^6 - 2^1
+	for i := 0; i < 4; i++ {
+		t.Square(&t) // 2^10 - 2^5
+	}
+	z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
+
+	t.Square(&z2_10_0) // 2^11 - 2^1
+	for i := 0; i < 9; i++ {
+		t.Square(&t) // 2^20 - 2^10
+	}
+	z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
+
+	t.Square(&z2_20_0) // 2^21 - 2^1
+	for i := 0; i < 19; i++ {
+		t.Square(&t) // 2^40 - 2^20
+	}
+	t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
+
+	t.Square(&t) // 2^41 - 2^1
+	for i := 0; i < 9; i++ {
+		t.Square(&t) // 2^50 - 2^10
+	}
+	z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
+
+	t.Square(&z2_50_0) // 2^51 - 2^1
+	for i := 0; i < 49; i++ {
+		t.Square(&t) // 2^100 - 2^50
+	}
+	z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
+
+	t.Square(&z2_100_0) // 2^101 - 2^1
+	for i := 0; i < 99; i++ {
+		t.Square(&t) // 2^200 - 2^100
+	}
+	t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
+
+	t.Square(&t) // 2^201 - 2^1
+	for i := 0; i < 49; i++ {
+		t.Square(&t) // 2^250 - 2^50
+	}
+	t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
+
+	t.Square(&t) // 2^251 - 2^1
+	t.Square(&t) // 2^252 - 2^2
+	t.Square(&t) // 2^253 - 2^3
+	t.Square(&t) // 2^254 - 2^4
+	t.Square(&t) // 2^255 - 2^5
+
+	return v.Multiply(&t, &z11) // 2^255 - 21
+}
+
+// Set sets v = a, and returns v.
+func (v *Element) Set(a *Element) *Element {
+	*v = *a
+	return v
+}
+
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
+//
+// Consistent with RFC 7748, the most significant bit (the high bit of the
+// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
+	if len(x) != 32 {
+		return nil, errors.New("edwards25519: invalid field element input size")
+	}
+
+	// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
+	v.l0 = binary.LittleEndian.Uint64(x[0:8])
+	v.l0 &= maskLow51Bits
+	// Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
+	v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
+	v.l1 &= maskLow51Bits
+	// Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
+	v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
+	v.l2 &= maskLow51Bits
+	// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
+	v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
+	v.l3 &= maskLow51Bits
+	// Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
+	// Note: not bytes 25:33, shift 4, to avoid overread.
+	v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
+	v.l4 &= maskLow51Bits
+
+	return v, nil
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of v.
+func (v *Element) Bytes() []byte {
+	// This function is outlined to make the allocations inline in the caller
+	// rather than happen on the heap.
+	var out [32]byte
+	return v.bytes(&out)
+}
+
+func (v *Element) bytes(out *[32]byte) []byte {
+	t := *v
+	t.reduce()
+
+	var buf [8]byte
+	for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
+		bitsOffset := i * 51
+		binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
+		for i, bb := range buf {
+			off := bitsOffset/8 + i
+			if off >= len(out) {
+				break
+			}
+			out[off] |= bb
+		}
+	}
+
+	return out[:]
+}
+
+// Equal returns 1 if v and u are equal, and 0 otherwise.
+func (v *Element) Equal(u *Element) int {
+	sa, sv := u.Bytes(), v.Bytes()
+	return subtle.ConstantTimeCompare(sa, sv)
+}
+
+// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
+func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *Element) Select(a, b *Element, cond int) *Element {
+	m := mask64Bits(cond)
+	v.l0 = (m & a.l0) | (^m & b.l0)
+	v.l1 = (m & a.l1) | (^m & b.l1)
+	v.l2 = (m & a.l2) | (^m & b.l2)
+	v.l3 = (m & a.l3) | (^m & b.l3)
+	v.l4 = (m & a.l4) | (^m & b.l4)
+	return v
+}
+
+// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
+func (v *Element) Swap(u *Element, cond int) {
+	m := mask64Bits(cond)
+	t := m & (v.l0 ^ u.l0)
+	v.l0 ^= t
+	u.l0 ^= t
+	t = m & (v.l1 ^ u.l1)
+	v.l1 ^= t
+	u.l1 ^= t
+	t = m & (v.l2 ^ u.l2)
+	v.l2 ^= t
+	u.l2 ^= t
+	t = m & (v.l3 ^ u.l3)
+	v.l3 ^= t
+	u.l3 ^= t
+	t = m & (v.l4 ^ u.l4)
+	v.l4 ^= t
+	u.l4 ^= t
+}
+
+// IsNegative returns 1 if v is negative, and 0 otherwise.
+func (v *Element) IsNegative() int {
+	return int(v.Bytes()[0] & 1)
+}
+
+// Absolute sets v to |u|, and returns v.
+func (v *Element) Absolute(u *Element) *Element {
+	return v.Select(new(Element).Negate(u), u, u.IsNegative())
+}
+
+// Multiply sets v = x * y, and returns v.
+func (v *Element) Multiply(x, y *Element) *Element {
+	feMul(v, x, y)
+	return v
+}
+
+// Square sets v = x * x, and returns v.
+func (v *Element) Square(x *Element) *Element {
+	feSquare(v, x)
+	return v
+}
+
+// Mult32 sets v = x * y, and returns v.
+func (v *Element) Mult32(x *Element, y uint32) *Element {
+	x0lo, x0hi := mul51(x.l0, y)
+	x1lo, x1hi := mul51(x.l1, y)
+	x2lo, x2hi := mul51(x.l2, y)
+	x3lo, x3hi := mul51(x.l3, y)
+	x4lo, x4hi := mul51(x.l4, y)
+	v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
+	v.l1 = x1lo + x0hi
+	v.l2 = x2lo + x1hi
+	v.l3 = x3lo + x2hi
+	v.l4 = x4lo + x3hi
+	// The hi portions are going to be only 32 bits, plus any previous excess,
+	// so we can skip the carry propagation.
+	return v
+}
+
+// mul51 returns lo + hi * 2⁵¹ = a * b.
+func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
+	mh, ml := bits.Mul64(a, uint64(b))
+	lo = ml & maskLow51Bits
+	hi = (mh << 13) | (ml >> 51)
+	return
+}
+
+// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
+func (v *Element) Pow22523(x *Element) *Element {
+	var t0, t1, t2 Element
+
+	t0.Square(x)             // x^2
+	t1.Square(&t0)           // x^4
+	t1.Square(&t1)           // x^8
+	t1.Multiply(x, &t1)      // x^9
+	t0.Multiply(&t0, &t1)    // x^11
+	t0.Square(&t0)           // x^22
+	t0.Multiply(&t1, &t0)    // x^31
+	t1.Square(&t0)           // x^62
+	for i := 1; i < 5; i++ { // x^992
+		t1.Square(&t1)
+	}
+	t0.Multiply(&t1, &t0)     // x^1023 -> 1023 = 2^10 - 1
+	t1.Square(&t0)            // 2^11 - 2
+	for i := 1; i < 10; i++ { // 2^20 - 2^10
+		t1.Square(&t1)
+	}
+	t1.Multiply(&t1, &t0)     // 2^20 - 1
+	t2.Square(&t1)            // 2^21 - 2
+	for i := 1; i < 20; i++ { // 2^40 - 2^20
+		t2.Square(&t2)
+	}
+	t1.Multiply(&t2, &t1)     // 2^40 - 1
+	t1.Square(&t1)            // 2^41 - 2
+	for i := 1; i < 10; i++ { // 2^50 - 2^10
+		t1.Square(&t1)
+	}
+	t0.Multiply(&t1, &t0)     // 2^50 - 1
+	t1.Square(&t0)            // 2^51 - 2
+	for i := 1; i < 50; i++ { // 2^100 - 2^50
+		t1.Square(&t1)
+	}
+	t1.Multiply(&t1, &t0)      // 2^100 - 1
+	t2.Square(&t1)             // 2^101 - 2
+	for i := 1; i < 100; i++ { // 2^200 - 2^100
+		t2.Square(&t2)
+	}
+	t1.Multiply(&t2, &t1)     // 2^200 - 1
+	t1.Square(&t1)            // 2^201 - 2
+	for i := 1; i < 50; i++ { // 2^250 - 2^50
+		t1.Square(&t1)
+	}
+	t0.Multiply(&t1, &t0)     // 2^250 - 1
+	t0.Square(&t0)            // 2^251 - 2
+	t0.Square(&t0)            // 2^252 - 4
+	return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
+}
+
+// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
+var sqrtM1 = &Element{1718705420411056, 234908883556509,
+	2233514472574048, 2117202627021982, 765476049583133}
+
+// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
+//
+// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
+// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
+// and returns r and 0.
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+	t0 := new(Element)
+
+	// r = (u * v3) * (u * v7)^((p-5)/8)
+	v2 := new(Element).Square(v)
+	uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+	uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+	rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
+
+	check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
+
+	uNeg := new(Element).Negate(u)
+	correctSignSqrt := check.Equal(u)
+	flippedSignSqrt := check.Equal(uNeg)
+	flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
+
+	rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
+	// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
+	rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
+
+	r.Absolute(rr) // Choose the nonnegative square root.
+	return r, correctSignSqrt | flippedSignSqrt
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go
new file mode 100644
index 0000000..edcf163
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64.go
@@ -0,0 +1,16 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s
new file mode 100644
index 0000000..293f013
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64.s
@@ -0,0 +1,379 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+#include "textflag.h"
+
+// func feMul(out *Element, a *Element, b *Element)
+TEXT ·feMul(SB), NOSPLIT, $0-24
+	MOVQ a+8(FP), CX
+	MOVQ b+16(FP), BX
+
+	// r0 = a0×b0
+	MOVQ (CX), AX
+	MULQ (BX)
+	MOVQ AX, DI
+	MOVQ DX, SI
+
+	// r0 += 19×a1×b4
+	MOVQ   8(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(BX)
+	ADDQ   AX, DI
+	ADCQ   DX, SI
+
+	// r0 += 19×a2×b3
+	MOVQ   16(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   24(BX)
+	ADDQ   AX, DI
+	ADCQ   DX, SI
+
+	// r0 += 19×a3×b2
+	MOVQ   24(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   16(BX)
+	ADDQ   AX, DI
+	ADCQ   DX, SI
+
+	// r0 += 19×a4×b1
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   8(BX)
+	ADDQ   AX, DI
+	ADCQ   DX, SI
+
+	// r1 = a0×b1
+	MOVQ (CX), AX
+	MULQ 8(BX)
+	MOVQ AX, R9
+	MOVQ DX, R8
+
+	// r1 += a1×b0
+	MOVQ 8(CX), AX
+	MULQ (BX)
+	ADDQ AX, R9
+	ADCQ DX, R8
+
+	// r1 += 19×a2×b4
+	MOVQ   16(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(BX)
+	ADDQ   AX, R9
+	ADCQ   DX, R8
+
+	// r1 += 19×a3×b3
+	MOVQ   24(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   24(BX)
+	ADDQ   AX, R9
+	ADCQ   DX, R8
+
+	// r1 += 19×a4×b2
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   16(BX)
+	ADDQ   AX, R9
+	ADCQ   DX, R8
+
+	// r2 = a0×b2
+	MOVQ (CX), AX
+	MULQ 16(BX)
+	MOVQ AX, R11
+	MOVQ DX, R10
+
+	// r2 += a1×b1
+	MOVQ 8(CX), AX
+	MULQ 8(BX)
+	ADDQ AX, R11
+	ADCQ DX, R10
+
+	// r2 += a2×b0
+	MOVQ 16(CX), AX
+	MULQ (BX)
+	ADDQ AX, R11
+	ADCQ DX, R10
+
+	// r2 += 19×a3×b4
+	MOVQ   24(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(BX)
+	ADDQ   AX, R11
+	ADCQ   DX, R10
+
+	// r2 += 19×a4×b3
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   24(BX)
+	ADDQ   AX, R11
+	ADCQ   DX, R10
+
+	// r3 = a0×b3
+	MOVQ (CX), AX
+	MULQ 24(BX)
+	MOVQ AX, R13
+	MOVQ DX, R12
+
+	// r3 += a1×b2
+	MOVQ 8(CX), AX
+	MULQ 16(BX)
+	ADDQ AX, R13
+	ADCQ DX, R12
+
+	// r3 += a2×b1
+	MOVQ 16(CX), AX
+	MULQ 8(BX)
+	ADDQ AX, R13
+	ADCQ DX, R12
+
+	// r3 += a3×b0
+	MOVQ 24(CX), AX
+	MULQ (BX)
+	ADDQ AX, R13
+	ADCQ DX, R12
+
+	// r3 += 19×a4×b4
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(BX)
+	ADDQ   AX, R13
+	ADCQ   DX, R12
+
+	// r4 = a0×b4
+	MOVQ (CX), AX
+	MULQ 32(BX)
+	MOVQ AX, R15
+	MOVQ DX, R14
+
+	// r4 += a1×b3
+	MOVQ 8(CX), AX
+	MULQ 24(BX)
+	ADDQ AX, R15
+	ADCQ DX, R14
+
+	// r4 += a2×b2
+	MOVQ 16(CX), AX
+	MULQ 16(BX)
+	ADDQ AX, R15
+	ADCQ DX, R14
+
+	// r4 += a3×b1
+	MOVQ 24(CX), AX
+	MULQ 8(BX)
+	ADDQ AX, R15
+	ADCQ DX, R14
+
+	// r4 += a4×b0
+	MOVQ 32(CX), AX
+	MULQ (BX)
+	ADDQ AX, R15
+	ADCQ DX, R14
+
+	// First reduction chain
+	MOVQ   $0x0007ffffffffffff, AX
+	SHLQ   $0x0d, DI, SI
+	SHLQ   $0x0d, R9, R8
+	SHLQ   $0x0d, R11, R10
+	SHLQ   $0x0d, R13, R12
+	SHLQ   $0x0d, R15, R14
+	ANDQ   AX, DI
+	IMUL3Q $0x13, R14, R14
+	ADDQ   R14, DI
+	ANDQ   AX, R9
+	ADDQ   SI, R9
+	ANDQ   AX, R11
+	ADDQ   R8, R11
+	ANDQ   AX, R13
+	ADDQ   R10, R13
+	ANDQ   AX, R15
+	ADDQ   R12, R15
+
+	// Second reduction chain (carryPropagate)
+	MOVQ   DI, SI
+	SHRQ   $0x33, SI
+	MOVQ   R9, R8
+	SHRQ   $0x33, R8
+	MOVQ   R11, R10
+	SHRQ   $0x33, R10
+	MOVQ   R13, R12
+	SHRQ   $0x33, R12
+	MOVQ   R15, R14
+	SHRQ   $0x33, R14
+	ANDQ   AX, DI
+	IMUL3Q $0x13, R14, R14
+	ADDQ   R14, DI
+	ANDQ   AX, R9
+	ADDQ   SI, R9
+	ANDQ   AX, R11
+	ADDQ   R8, R11
+	ANDQ   AX, R13
+	ADDQ   R10, R13
+	ANDQ   AX, R15
+	ADDQ   R12, R15
+
+	// Store output
+	MOVQ out+0(FP), AX
+	MOVQ DI, (AX)
+	MOVQ R9, 8(AX)
+	MOVQ R11, 16(AX)
+	MOVQ R13, 24(AX)
+	MOVQ R15, 32(AX)
+	RET
+
+// func feSquare(out *Element, a *Element)
+TEXT ·feSquare(SB), NOSPLIT, $0-16
+	MOVQ a+8(FP), CX
+
+	// r0 = l0×l0
+	MOVQ (CX), AX
+	MULQ (CX)
+	MOVQ AX, SI
+	MOVQ DX, BX
+
+	// r0 += 38×l1×l4
+	MOVQ   8(CX), AX
+	IMUL3Q $0x26, AX, AX
+	MULQ   32(CX)
+	ADDQ   AX, SI
+	ADCQ   DX, BX
+
+	// r0 += 38×l2×l3
+	MOVQ   16(CX), AX
+	IMUL3Q $0x26, AX, AX
+	MULQ   24(CX)
+	ADDQ   AX, SI
+	ADCQ   DX, BX
+
+	// r1 = 2×l0×l1
+	MOVQ (CX), AX
+	SHLQ $0x01, AX
+	MULQ 8(CX)
+	MOVQ AX, R8
+	MOVQ DX, DI
+
+	// r1 += 38×l2×l4
+	MOVQ   16(CX), AX
+	IMUL3Q $0x26, AX, AX
+	MULQ   32(CX)
+	ADDQ   AX, R8
+	ADCQ   DX, DI
+
+	// r1 += 19×l3×l3
+	MOVQ   24(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   24(CX)
+	ADDQ   AX, R8
+	ADCQ   DX, DI
+
+	// r2 = 2×l0×l2
+	MOVQ (CX), AX
+	SHLQ $0x01, AX
+	MULQ 16(CX)
+	MOVQ AX, R10
+	MOVQ DX, R9
+
+	// r2 += l1×l1
+	MOVQ 8(CX), AX
+	MULQ 8(CX)
+	ADDQ AX, R10
+	ADCQ DX, R9
+
+	// r2 += 38×l3×l4
+	MOVQ   24(CX), AX
+	IMUL3Q $0x26, AX, AX
+	MULQ   32(CX)
+	ADDQ   AX, R10
+	ADCQ   DX, R9
+
+	// r3 = 2×l0×l3
+	MOVQ (CX), AX
+	SHLQ $0x01, AX
+	MULQ 24(CX)
+	MOVQ AX, R12
+	MOVQ DX, R11
+
+	// r3 += 2×l1×l2
+	MOVQ   8(CX), AX
+	IMUL3Q $0x02, AX, AX
+	MULQ   16(CX)
+	ADDQ   AX, R12
+	ADCQ   DX, R11
+
+	// r3 += 19×l4×l4
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(CX)
+	ADDQ   AX, R12
+	ADCQ   DX, R11
+
+	// r4 = 2×l0×l4
+	MOVQ (CX), AX
+	SHLQ $0x01, AX
+	MULQ 32(CX)
+	MOVQ AX, R14
+	MOVQ DX, R13
+
+	// r4 += 2×l1×l3
+	MOVQ   8(CX), AX
+	IMUL3Q $0x02, AX, AX
+	MULQ   24(CX)
+	ADDQ   AX, R14
+	ADCQ   DX, R13
+
+	// r4 += l2×l2
+	MOVQ 16(CX), AX
+	MULQ 16(CX)
+	ADDQ AX, R14
+	ADCQ DX, R13
+
+	// First reduction chain
+	MOVQ   $0x0007ffffffffffff, AX
+	SHLQ   $0x0d, SI, BX
+	SHLQ   $0x0d, R8, DI
+	SHLQ   $0x0d, R10, R9
+	SHLQ   $0x0d, R12, R11
+	SHLQ   $0x0d, R14, R13
+	ANDQ   AX, SI
+	IMUL3Q $0x13, R13, R13
+	ADDQ   R13, SI
+	ANDQ   AX, R8
+	ADDQ   BX, R8
+	ANDQ   AX, R10
+	ADDQ   DI, R10
+	ANDQ   AX, R12
+	ADDQ   R9, R12
+	ANDQ   AX, R14
+	ADDQ   R11, R14
+
+	// Second reduction chain (carryPropagate)
+	MOVQ   SI, BX
+	SHRQ   $0x33, BX
+	MOVQ   R8, DI
+	SHRQ   $0x33, DI
+	MOVQ   R10, R9
+	SHRQ   $0x33, R9
+	MOVQ   R12, R11
+	SHRQ   $0x33, R11
+	MOVQ   R14, R13
+	SHRQ   $0x33, R13
+	ANDQ   AX, SI
+	IMUL3Q $0x13, R13, R13
+	ADDQ   R13, SI
+	ANDQ   AX, R8
+	ADDQ   BX, R8
+	ANDQ   AX, R10
+	ADDQ   DI, R10
+	ANDQ   AX, R12
+	ADDQ   R9, R12
+	ANDQ   AX, R14
+	ADDQ   R11, R14
+
+	// Store output
+	MOVQ out+0(FP), AX
+	MOVQ SI, (AX)
+	MOVQ R8, 8(AX)
+	MOVQ R10, 16(AX)
+	MOVQ R12, 24(AX)
+	MOVQ R14, 32(AX)
+	RET
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
new file mode 100644
index 0000000..ddb6c9b
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+// +build !amd64 !gc purego
+
+package field
+
+func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
+
+func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go
new file mode 100644
index 0000000..af459ef
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+// +build arm64,gc,!purego
+
+package field
+
+//go:noescape
+func carryPropagate(v *Element)
+
+func (v *Element) carryPropagate() *Element {
+	carryPropagate(v)
+	return v
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s
new file mode 100644
index 0000000..3126a43
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s
@@ -0,0 +1,42 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+
+#include "textflag.h"
+
+// carryPropagate works exactly like carryPropagateGeneric and uses the
+// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
+// avoids loading R0-R4 twice and uses LDP and STP.
+//
+// See https://golang.org/issues/43145 for the main compiler issue.
+//
+// func carryPropagate(v *Element)
+TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
+	MOVD v+0(FP), R20
+
+	LDP 0(R20), (R0, R1)
+	LDP 16(R20), (R2, R3)
+	MOVD 32(R20), R4
+
+	AND $0x7ffffffffffff, R0, R10
+	AND $0x7ffffffffffff, R1, R11
+	AND $0x7ffffffffffff, R2, R12
+	AND $0x7ffffffffffff, R3, R13
+	AND $0x7ffffffffffff, R4, R14
+
+	ADD R0>>51, R11, R11
+	ADD R1>>51, R12, R12
+	ADD R2>>51, R13, R13
+	ADD R3>>51, R14, R14
+	// R4>>51 * 19 + R10 -> R10
+	LSR $51, R4, R21
+	MOVD $19, R22
+	MADD R22, R10, R21, R10
+
+	STP (R10, R11), 0(R20)
+	STP (R12, R13), 16(R20)
+	MOVD R14, 32(R20)
+
+	RET
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
new file mode 100644
index 0000000..234a5b2
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 || !gc || purego
+// +build !arm64 !gc purego
+
+package field
+
+func (v *Element) carryPropagate() *Element {
+	return v.carryPropagateGeneric()
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go
new file mode 100644
index 0000000..1ef503b
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_extra.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "errors"
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/ed25519/edwards25519/field package.
+
+// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
+// is reduced modulo the field order. If x is not of the right length,
+// SetWideBytes returns nil and an error, and the receiver is unchanged.
+//
+// SetWideBytes is not necessary to select a uniformly distributed value, and is
+// only provided for compatibility: SetBytes can be used instead as the chance
+// of bias is less than 2⁻²⁵⁰.
+func (v *Element) SetWideBytes(x []byte) (*Element, error) {
+	if len(x) != 64 {
+		return nil, errors.New("edwards25519: invalid SetWideBytes input size")
+	}
+
+	// Split the 64 bytes into two elements, and extract the most significant
+	// bit of each, which is ignored by SetBytes.
+	lo, _ := new(Element).SetBytes(x[:32])
+	loMSB := uint64(x[31] >> 7)
+	hi, _ := new(Element).SetBytes(x[32:])
+	hiMSB := uint64(x[63] >> 7)
+
+	// The output we want is
+	//
+	//   v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
+	//
+	// which applying the reduction identity comes out to
+	//
+	//   v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
+	//
+	// l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
+	// (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
+	// (hiMSB * 2 * 19²), so it fits in a uint64.
+
+	v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
+	v.l1 = lo.l1 + hi.l1*2*19
+	v.l2 = lo.l2 + hi.l2*2*19
+	v.l3 = lo.l3 + hi.l3*2*19
+	v.l4 = lo.l4 + hi.l4*2*19
+
+	return v.carryPropagate(), nil
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go
new file mode 100644
index 0000000..86f5fd9
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_generic.go
@@ -0,0 +1,266 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "math/bits"
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+	lo, hi uint64
+}
+
+// mul64 returns a * b.
+func mul64(a, b uint64) uint128 {
+	hi, lo := bits.Mul64(a, b)
+	return uint128{lo, hi}
+}
+
+// addMul64 returns v + a * b.
+func addMul64(v uint128, a, b uint64) uint128 {
+	hi, lo := bits.Mul64(a, b)
+	lo, c := bits.Add64(lo, v.lo, 0)
+	hi, _ = bits.Add64(hi, v.hi, c)
+	return uint128{lo, hi}
+}
+
+// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
+func shiftRightBy51(a uint128) uint64 {
+	return (a.hi << (64 - 51)) | (a.lo >> 51)
+}
+
+func feMulGeneric(v, a, b *Element) {
+	a0 := a.l0
+	a1 := a.l1
+	a2 := a.l2
+	a3 := a.l3
+	a4 := a.l4
+
+	b0 := b.l0
+	b1 := b.l1
+	b2 := b.l2
+	b3 := b.l3
+	b4 := b.l4
+
+	// Limb multiplication works like pen-and-paper columnar multiplication, but
+	// with 51-bit limbs instead of digits.
+	//
+	//                          a4   a3   a2   a1   a0  x
+	//                          b4   b3   b2   b1   b0  =
+	//                         ------------------------
+	//                        a4b0 a3b0 a2b0 a1b0 a0b0  +
+	//                   a4b1 a3b1 a2b1 a1b1 a0b1       +
+	//              a4b2 a3b2 a2b2 a1b2 a0b2            +
+	//         a4b3 a3b3 a2b3 a1b3 a0b3                 +
+	//    a4b4 a3b4 a2b4 a1b4 a0b4                      =
+	//   ----------------------------------------------
+	//      r8   r7   r6   r5   r4   r3   r2   r1   r0
+	//
+	// We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
+	// reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
+	// r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
+	//
+	// Reduction can be carried out simultaneously to multiplication. For
+	// example, we do not compute r5: whenever the result of a multiplication
+	// belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
+	//
+	//            a4b0    a3b0    a2b0    a1b0    a0b0  +
+	//            a3b1    a2b1    a1b1    a0b1 19×a4b1  +
+	//            a2b2    a1b2    a0b2 19×a4b2 19×a3b2  +
+	//            a1b3    a0b3 19×a4b3 19×a3b3 19×a2b3  +
+	//            a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4  =
+	//           --------------------------------------
+	//              r4      r3      r2      r1      r0
+	//
+	// Finally we add up the columns into wide, overlapping limbs.
+
+	a1_19 := a1 * 19
+	a2_19 := a2 * 19
+	a3_19 := a3 * 19
+	a4_19 := a4 * 19
+
+	// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+	r0 := mul64(a0, b0)
+	r0 = addMul64(r0, a1_19, b4)
+	r0 = addMul64(r0, a2_19, b3)
+	r0 = addMul64(r0, a3_19, b2)
+	r0 = addMul64(r0, a4_19, b1)
+
+	// r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+	r1 := mul64(a0, b1)
+	r1 = addMul64(r1, a1, b0)
+	r1 = addMul64(r1, a2_19, b4)
+	r1 = addMul64(r1, a3_19, b3)
+	r1 = addMul64(r1, a4_19, b2)
+
+	// r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+	r2 := mul64(a0, b2)
+	r2 = addMul64(r2, a1, b1)
+	r2 = addMul64(r2, a2, b0)
+	r2 = addMul64(r2, a3_19, b4)
+	r2 = addMul64(r2, a4_19, b3)
+
+	// r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+	r3 := mul64(a0, b3)
+	r3 = addMul64(r3, a1, b2)
+	r3 = addMul64(r3, a2, b1)
+	r3 = addMul64(r3, a3, b0)
+	r3 = addMul64(r3, a4_19, b4)
+
+	// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+	r4 := mul64(a0, b4)
+	r4 = addMul64(r4, a1, b3)
+	r4 = addMul64(r4, a2, b2)
+	r4 = addMul64(r4, a3, b1)
+	r4 = addMul64(r4, a4, b0)
+
+	// After the multiplication, we need to reduce (carry) the five coefficients
+	// to obtain a result with limbs that are at most slightly larger than 2⁵¹,
+	// to respect the Element invariant.
+	//
+	// Overall, the reduction works the same as carryPropagate, except with
+	// wider inputs: we take the carry for each coefficient by shifting it right
+	// by 51, and add it to the limb above it. The top carry is multiplied by 19
+	// according to the reduction identity and added to the lowest limb.
+	//
+	// The largest coefficient (r0) will be at most 111 bits, which guarantees
+	// that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
+	//
+	//     r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+	//     r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
+	//     r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
+	//     r0 < 2⁷ × 2⁵² × 2⁵²
+	//     r0 < 2¹¹¹
+	//
+	// Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
+	// 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
+	// allows us to easily apply the reduction identity.
+	//
+	//     r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+	//     r4 < 5 × 2⁵² × 2⁵²
+	//     r4 < 2¹⁰⁷
+	//
+
+	c0 := shiftRightBy51(r0)
+	c1 := shiftRightBy51(r1)
+	c2 := shiftRightBy51(r2)
+	c3 := shiftRightBy51(r3)
+	c4 := shiftRightBy51(r4)
+
+	rr0 := r0.lo&maskLow51Bits + c4*19
+	rr1 := r1.lo&maskLow51Bits + c0
+	rr2 := r2.lo&maskLow51Bits + c1
+	rr3 := r3.lo&maskLow51Bits + c2
+	rr4 := r4.lo&maskLow51Bits + c3
+
+	// Now all coefficients fit into 64-bit registers but are still too large to
+	// be passed around as an Element. We therefore do one last carry chain,
+	// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
+	*v = Element{rr0, rr1, rr2, rr3, rr4}
+	v.carryPropagate()
+}
+
+func feSquareGeneric(v, a *Element) {
+	l0 := a.l0
+	l1 := a.l1
+	l2 := a.l2
+	l3 := a.l3
+	l4 := a.l4
+
+	// Squaring works precisely like multiplication above, but thanks to its
+	// symmetry we get to group a few terms together.
+	//
+	//                          l4   l3   l2   l1   l0  x
+	//                          l4   l3   l2   l1   l0  =
+	//                         ------------------------
+	//                        l4l0 l3l0 l2l0 l1l0 l0l0  +
+	//                   l4l1 l3l1 l2l1 l1l1 l0l1       +
+	//              l4l2 l3l2 l2l2 l1l2 l0l2            +
+	//         l4l3 l3l3 l2l3 l1l3 l0l3                 +
+	//    l4l4 l3l4 l2l4 l1l4 l0l4                      =
+	//   ----------------------------------------------
+	//      r8   r7   r6   r5   r4   r3   r2   r1   r0
+	//
+	//            l4l0    l3l0    l2l0    l1l0    l0l0  +
+	//            l3l1    l2l1    l1l1    l0l1 19×l4l1  +
+	//            l2l2    l1l2    l0l2 19×l4l2 19×l3l2  +
+	//            l1l3    l0l3 19×l4l3 19×l3l3 19×l2l3  +
+	//            l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4  =
+	//           --------------------------------------
+	//              r4      r3      r2      r1      r0
+	//
+	// With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
+	// only three Mul64 and four Add64, instead of five and eight.
+
+	l0_2 := l0 * 2
+	l1_2 := l1 * 2
+
+	l1_38 := l1 * 38
+	l2_38 := l2 * 38
+	l3_38 := l3 * 38
+
+	l3_19 := l3 * 19
+	l4_19 := l4 * 19
+
+	// r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
+	r0 := mul64(l0, l0)
+	r0 = addMul64(r0, l1_38, l4)
+	r0 = addMul64(r0, l2_38, l3)
+
+	// r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+	r1 := mul64(l0_2, l1)
+	r1 = addMul64(r1, l2_38, l4)
+	r1 = addMul64(r1, l3_19, l3)
+
+	// r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+	r2 := mul64(l0_2, l2)
+	r2 = addMul64(r2, l1, l1)
+	r2 = addMul64(r2, l3_38, l4)
+
+	// r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+	r3 := mul64(l0_2, l3)
+	r3 = addMul64(r3, l1_2, l2)
+	r3 = addMul64(r3, l4_19, l4)
+
+	// r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
+	r4 := mul64(l0_2, l4)
+	r4 = addMul64(r4, l1_2, l3)
+	r4 = addMul64(r4, l2, l2)
+
+	c0 := shiftRightBy51(r0)
+	c1 := shiftRightBy51(r1)
+	c2 := shiftRightBy51(r2)
+	c3 := shiftRightBy51(r3)
+	c4 := shiftRightBy51(r4)
+
+	rr0 := r0.lo&maskLow51Bits + c4*19
+	rr1 := r1.lo&maskLow51Bits + c0
+	rr2 := r2.lo&maskLow51Bits + c1
+	rr3 := r3.lo&maskLow51Bits + c2
+	rr4 := r4.lo&maskLow51Bits + c3
+
+	*v = Element{rr0, rr1, rr2, rr3, rr4}
+	v.carryPropagate()
+}
+
+// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
+func (v *Element) carryPropagateGeneric() *Element {
+	c0 := v.l0 >> 51
+	c1 := v.l1 >> 51
+	c2 := v.l2 >> 51
+	c3 := v.l3 >> 51
+	c4 := v.l4 >> 51
+
+	// c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+	// the final l0 will be at most 52 bits. Similarly for the rest.
+	v.l0 = v.l0&maskLow51Bits + c4*19
+	v.l1 = v.l1&maskLow51Bits + c0
+	v.l2 = v.l2&maskLow51Bits + c1
+	v.l3 = v.l3&maskLow51Bits + c2
+	v.l4 = v.l4&maskLow51Bits + c3
+
+	return v
+}
diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go
new file mode 100644
index 0000000..3fd1653
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalar.go
@@ -0,0 +1,343 @@
+// Copyright (c) 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+	"encoding/binary"
+	"errors"
+)
+
+// A Scalar is an integer modulo
+//
+//	l = 2^252 + 27742317777372353535851937790883648493
+//
+// which is the prime order of the edwards25519 group.
+//
+// This type works similarly to math/big.Int, and all arguments and
+// receivers are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Scalar struct {
+	// s is the scalar in the Montgomery domain, in the format of the
+	// fiat-crypto implementation.
+	s fiatScalarMontgomeryDomainFieldElement
+}
+
+// The field implementation in scalar_fiat.go is generated by the fiat-crypto
+// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
+// from a formally verified model.
+//
+// fiat-crypto code comes under the following license.
+//
+//     Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+//
+//     Redistribution and use in source and binary forms, with or without
+//     modification, are permitted provided that the following conditions are
+//     met:
+//
+//         1. Redistributions of source code must retain the above copyright
+//         notice, this list of conditions and the following disclaimer.
+//
+//     THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+//     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+//     THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+//     PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+//     Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+//     EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+//     PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+//     PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+//     LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+//     NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+//     SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+// NewScalar returns a new zero Scalar.
+func NewScalar() *Scalar {
+	return &Scalar{}
+}
+
+// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
+// using Multiply and then Add.
+func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
+	// Make a copy of z in case it aliases s.
+	zCopy := new(Scalar).Set(z)
+	return s.Multiply(x, y).Add(s, zCopy)
+}
+
+// Add sets s = x + y mod l, and returns s.
+func (s *Scalar) Add(x, y *Scalar) *Scalar {
+	// s = 1 * x + y mod l
+	fiatScalarAdd(&s.s, &x.s, &y.s)
+	return s
+}
+
+// Subtract sets s = x - y mod l, and returns s.
+func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
+	// s = -1 * y + x mod l
+	fiatScalarSub(&s.s, &x.s, &y.s)
+	return s
+}
+
+// Negate sets s = -x mod l, and returns s.
+func (s *Scalar) Negate(x *Scalar) *Scalar {
+	// s = -1 * x + 0 mod l
+	fiatScalarOpp(&s.s, &x.s)
+	return s
+}
+
+// Multiply sets s = x * y mod l, and returns s.
+func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
+	// s = x * y + 0 mod l
+	fiatScalarMul(&s.s, &x.s, &y.s)
+	return s
+}
+
+// Set sets s = x, and returns s.
+func (s *Scalar) Set(x *Scalar) *Scalar {
+	*s = *x
+	return s
+}
+
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to a uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
+	if len(x) != 64 {
+		return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
+	}
+
+	// We have a value x of 512 bits, but our fiatScalarFromBytes function
+	// expects an input lower than l, which is a little over 252 bits.
+	//
+	// Instead of writing a reduction function that operates on wider inputs, we
+	// can interpret x as the sum of three shorter values a, b, and c.
+	//
+	//    x = a + b * 2^168 + c * 2^336  mod l
+	//
+	// We then precompute 2^168 and 2^336 modulo l, and perform the reduction
+	// with two multiplications and two additions.
+
+	s.setShortBytes(x[:21])
+	t := new(Scalar).setShortBytes(x[21:42])
+	s.Add(s, t.Multiply(t, scalarTwo168))
+	t.setShortBytes(x[42:])
+	s.Add(s, t.Multiply(t, scalarTwo336))
+
+	return s, nil
+}
+
+// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
+// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
+// in the 2^256 Montgomery domain.
+var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
+	0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
+var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
+	0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
+
+// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
+// than 32 bytes.
+func (s *Scalar) setShortBytes(x []byte) *Scalar {
+	if len(x) >= 32 {
+		panic("edwards25519: internal error: setShortBytes called with a long string")
+	}
+	var buf [32]byte
+	copy(buf[:], x)
+	fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
+	fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+	return s
+}
+
+// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
+// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
+// returns nil and an error, and the receiver is unchanged.
+func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
+	if len(x) != 32 {
+		return nil, errors.New("invalid scalar length")
+	}
+	if !isReduced(x) {
+		return nil, errors.New("invalid scalar encoding")
+	}
+
+	fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
+	fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+
+	return s, nil
+}
+
+// scalarMinusOneBytes is l - 1 in little endian.
+var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
+
+// isReduced returns whether the given scalar in 32-byte little endian encoded
+// form is reduced modulo l.
+func isReduced(s []byte) bool {
+	if len(s) != 32 {
+		return false
+	}
+
+	for i := len(s) - 1; i >= 0; i-- {
+		switch {
+		case s[i] > scalarMinusOneBytes[i]:
+			return false
+		case s[i] < scalarMinusOneBytes[i]:
+			return true
+		}
+	}
+	return true
+}
+
+// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
+// Section 5.1.5 (also known as clamping) and sets s to the result. The input
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
+//
+// Note that since Scalar values are always reduced modulo the prime order of
+// the curve, the resulting value will not preserve any of the cofactor-clearing
+// properties that clamping is meant to provide. It will however work as
+// expected as long as it is applied to points on the prime order subgroup, like
+// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
+// irrelevant RFC 7748 clamping, but it is now required for compatibility.
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
+	// The description above omits the purpose of the high bits of the clamping
+	// for brevity, but those are also lost to reductions, and are also
+	// irrelevant to edwards25519 as they protect against a specific
+	// implementation bug that was once observed in a generic Montgomery ladder.
+	if len(x) != 32 {
+		return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
+	}
+
+	// We need to use the wide reduction from SetUniformBytes, since clamping
+	// sets the 2^254 bit, making the value higher than the order.
+	var wideBytes [64]byte
+	copy(wideBytes[:], x[:])
+	wideBytes[0] &= 248
+	wideBytes[31] &= 63
+	wideBytes[31] |= 64
+	return s.SetUniformBytes(wideBytes[:])
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of s.
+func (s *Scalar) Bytes() []byte {
+	// This function is outlined to make the allocations inline in the caller
+	// rather than happen on the heap.
+	var encoded [32]byte
+	return s.bytes(&encoded)
+}
+
+func (s *Scalar) bytes(out *[32]byte) []byte {
+	var ss fiatScalarNonMontgomeryDomainFieldElement
+	fiatScalarFromMontgomery(&ss, &s.s)
+	fiatScalarToBytes(out, (*[4]uint64)(&ss))
+	return out[:]
+}
+
+// Equal returns 1 if s and t are equal, and 0 otherwise.
+func (s *Scalar) Equal(t *Scalar) int {
+	var diff fiatScalarMontgomeryDomainFieldElement
+	fiatScalarSub(&diff, &s.s, &t.s)
+	var nonzero uint64
+	fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
+	nonzero |= nonzero >> 32
+	nonzero |= nonzero >> 16
+	nonzero |= nonzero >> 8
+	nonzero |= nonzero >> 4
+	nonzero |= nonzero >> 2
+	nonzero |= nonzero >> 1
+	return int(^nonzero) & 1
+}
+
+// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
+//
+// w must be between 2 and 8, or nonAdjacentForm will panic.
+func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
+	// This implementation is adapted from the one
+	// in curve25519-dalek and is documented there:
+	// https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
+	b := s.Bytes()
+	if b[31] > 127 {
+		panic("scalar has high bit set illegally")
+	}
+	if w < 2 {
+		panic("w must be at least 2 by the definition of NAF")
+	} else if w > 8 {
+		panic("NAF digits must fit in int8")
+	}
+
+	var naf [256]int8
+	var digits [5]uint64
+
+	for i := 0; i < 4; i++ {
+		digits[i] = binary.LittleEndian.Uint64(b[i*8:])
+	}
+
+	width := uint64(1 << w)
+	windowMask := uint64(width - 1)
+
+	pos := uint(0)
+	carry := uint64(0)
+	for pos < 256 {
+		indexU64 := pos / 64
+		indexBit := pos % 64
+		var bitBuf uint64
+		if indexBit < 64-w {
+			// This window's bits are contained in a single u64
+			bitBuf = digits[indexU64] >> indexBit
+		} else {
+			// Combine the current 64 bits with bits from the next 64
+			bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
+		}
+
+		// Add carry into the current window
+		window := carry + (bitBuf & windowMask)
+
+		if window&1 == 0 {
+			// If the window value is even, preserve the carry and continue.
+			// Why is the carry preserved?
+			// If carry == 0 and window & 1 == 0,
+			//    then the next carry should be 0
+			// If carry == 1 and window & 1 == 0,
+			//    then bit_buf & 1 == 1 so the next carry should be 1
+			pos += 1
+			continue
+		}
+
+		if window < width/2 {
+			carry = 0
+			naf[pos] = int8(window)
+		} else {
+			carry = 1
+			naf[pos] = int8(window) - int8(width)
+		}
+
+		pos += w
+	}
+	return naf
+}
+
+func (s *Scalar) signedRadix16() [64]int8 {
+	b := s.Bytes()
+	if b[31] > 127 {
+		panic("scalar has high bit set illegally")
+	}
+
+	var digits [64]int8
+
+	// Compute unsigned radix-16 digits:
+	for i := 0; i < 32; i++ {
+		digits[2*i] = int8(b[i] & 15)
+		digits[2*i+1] = int8((b[i] >> 4) & 15)
+	}
+
+	// Recenter coefficients:
+	for i := 0; i < 63; i++ {
+		carry := (digits[i] + 8) >> 4
+		digits[i] -= carry << 4
+		digits[i+1] += carry
+	}
+
+	return digits
+}
diff --git a/vendor/filippo.io/edwards25519/scalar_fiat.go b/vendor/filippo.io/edwards25519/scalar_fiat.go
new file mode 100644
index 0000000..2e5782b
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalar_fiat.go
@@ -0,0 +1,1147 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes
+//
+// curve description: Scalar
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes
+//
+// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+//   functions synthesized for this Montgomery arithmetic require the
+//
+//   input to be strictly less than the prime modulus (m), and also
+//
+//   require the input to be in the unique saturated representation.
+//
+//   All functions also ensure that these two properties are true of
+//
+//   return values.
+//
+//
+//
+// Computed values:
+//
+//   eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+//   bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+//   twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+//                            if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package edwards25519
+
+import "math/bits"
+
+type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type fiatScalarInt1 int64   // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarMontgomeryDomainFieldElement [4]uint64
+
+// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarNonMontgomeryDomainFieldElement [4]uint64
+
+// fiatScalarCmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+//	out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+//	arg1: [0x0 ~> 0x1]
+//	arg2: [0x0 ~> 0xffffffffffffffff]
+//	arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+//	out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) {
+	x1 := (uint64(arg1) * 0xffffffffffffffff)
+	x2 := ((x1 & arg3) | ((^x1) & arg2))
+	*out1 = x2
+}
+
+// fiatScalarMul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+//	0 ≤ eval arg1 < m
+//	0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+//	eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+//	0 ≤ eval out1 < m
+func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+	x1 := arg1[1]
+	x2 := arg1[2]
+	x3 := arg1[3]
+	x4 := arg1[0]
+	var x5 uint64
+	var x6 uint64
+	x6, x5 = bits.Mul64(x4, arg2[3])
+	var x7 uint64
+	var x8 uint64
+	x8, x7 = bits.Mul64(x4, arg2[2])
+	var x9 uint64
+	var x10 uint64
+	x10, x9 = bits.Mul64(x4, arg2[1])
+	var x11 uint64
+	var x12 uint64
+	x12, x11 = bits.Mul64(x4, arg2[0])
+	var x13 uint64
+	var x14 uint64
+	x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+	var x15 uint64
+	var x16 uint64
+	x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+	var x17 uint64
+	var x18 uint64
+	x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+	x19 := (uint64(fiatScalarUint1(x18)) + x6)
+	var x20 uint64
+	_, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
+	var x22 uint64
+	var x23 uint64
+	x23, x22 = bits.Mul64(x20, 0x1000000000000000)
+	var x24 uint64
+	var x25 uint64
+	x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6)
+	var x26 uint64
+	var x27 uint64
+	x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed)
+	var x28 uint64
+	var x29 uint64
+	x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+	x30 := (uint64(fiatScalarUint1(x29)) + x25)
+	var x32 uint64
+	_, x32 = bits.Add64(x11, x26, uint64(0x0))
+	var x33 uint64
+	var x34 uint64
+	x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32)))
+	var x35 uint64
+	var x36 uint64
+	x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34)))
+	var x37 uint64
+	var x38 uint64
+	x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36)))
+	var x39 uint64
+	var x40 uint64
+	x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38)))
+	var x41 uint64
+	var x42 uint64
+	x42, x41 = bits.Mul64(x1, arg2[3])
+	var x43 uint64
+	var x44 uint64
+	x44, x43 = bits.Mul64(x1, arg2[2])
+	var x45 uint64
+	var x46 uint64
+	x46, x45 = bits.Mul64(x1, arg2[1])
+	var x47 uint64
+	var x48 uint64
+	x48, x47 = bits.Mul64(x1, arg2[0])
+	var x49 uint64
+	var x50 uint64
+	x49, x50 = bits.Add64(x48, x45, uint64(0x0))
+	var x51 uint64
+	var x52 uint64
+	x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50)))
+	var x53 uint64
+	var x54 uint64
+	x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52)))
+	x55 := (uint64(fiatScalarUint1(x54)) + x42)
+	var x56 uint64
+	var x57 uint64
+	x56, x57 = bits.Add64(x33, x47, uint64(0x0))
+	var x58 uint64
+	var x59 uint64
+	x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57)))
+	var x60 uint64
+	var x61 uint64
+	x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59)))
+	var x62 uint64
+	var x63 uint64
+	x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61)))
+	var x64 uint64
+	var x65 uint64
+	x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63)))
+	var x66 uint64
+	_, x66 = bits.Mul64(x56, 0xd2b51da312547e1b)
+	var x68 uint64
+	var x69 uint64
+	x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+	var x70 uint64
+	var x71 uint64
+	x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+	var x72 uint64
+	var x73 uint64
+	x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+	var x74 uint64
+	var x75 uint64
+	x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+	x76 := (uint64(fiatScalarUint1(x75)) + x71)
+	var x78 uint64
+	_, x78 = bits.Add64(x56, x72, uint64(0x0))
+	var x79 uint64
+	var x80 uint64
+	x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78)))
+	var x81 uint64
+	var x82 uint64
+	x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80)))
+	var x83 uint64
+	var x84 uint64
+	x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82)))
+	var x85 uint64
+	var x86 uint64
+	x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84)))
+	x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65)))
+	var x88 uint64
+	var x89 uint64
+	x89, x88 = bits.Mul64(x2, arg2[3])
+	var x90 uint64
+	var x91 uint64
+	x91, x90 = bits.Mul64(x2, arg2[2])
+	var x92 uint64
+	var x93 uint64
+	x93, x92 = bits.Mul64(x2, arg2[1])
+	var x94 uint64
+	var x95 uint64
+	x95, x94 = bits.Mul64(x2, arg2[0])
+	var x96 uint64
+	var x97 uint64
+	x96, x97 = bits.Add64(x95, x92, uint64(0x0))
+	var x98 uint64
+	var x99 uint64
+	x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97)))
+	var x100 uint64
+	var x101 uint64
+	x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99)))
+	x102 := (uint64(fiatScalarUint1(x101)) + x89)
+	var x103 uint64
+	var x104 uint64
+	x103, x104 = bits.Add64(x79, x94, uint64(0x0))
+	var x105 uint64
+	var x106 uint64
+	x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104)))
+	var x107 uint64
+	var x108 uint64
+	x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106)))
+	var x109 uint64
+	var x110 uint64
+	x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108)))
+	var x111 uint64
+	var x112 uint64
+	x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110)))
+	var x113 uint64
+	_, x113 = bits.Mul64(x103, 0xd2b51da312547e1b)
+	var x115 uint64
+	var x116 uint64
+	x116, x115 = bits.Mul64(x113, 0x1000000000000000)
+	var x117 uint64
+	var x118 uint64
+	x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6)
+	var x119 uint64
+	var x120 uint64
+	x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed)
+	var x121 uint64
+	var x122 uint64
+	x121, x122 = bits.Add64(x120, x117, uint64(0x0))
+	x123 := (uint64(fiatScalarUint1(x122)) + x118)
+	var x125 uint64
+	_, x125 = bits.Add64(x103, x119, uint64(0x0))
+	var x126 uint64
+	var x127 uint64
+	x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125)))
+	var x128 uint64
+	var x129 uint64
+	x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127)))
+	var x130 uint64
+	var x131 uint64
+	x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129)))
+	var x132 uint64
+	var x133 uint64
+	x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131)))
+	x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112)))
+	var x135 uint64
+	var x136 uint64
+	x136, x135 = bits.Mul64(x3, arg2[3])
+	var x137 uint64
+	var x138 uint64
+	x138, x137 = bits.Mul64(x3, arg2[2])
+	var x139 uint64
+	var x140 uint64
+	x140, x139 = bits.Mul64(x3, arg2[1])
+	var x141 uint64
+	var x142 uint64
+	x142, x141 = bits.Mul64(x3, arg2[0])
+	var x143 uint64
+	var x144 uint64
+	x143, x144 = bits.Add64(x142, x139, uint64(0x0))
+	var x145 uint64
+	var x146 uint64
+	x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144)))
+	var x147 uint64
+	var x148 uint64
+	x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146)))
+	x149 := (uint64(fiatScalarUint1(x148)) + x136)
+	var x150 uint64
+	var x151 uint64
+	x150, x151 = bits.Add64(x126, x141, uint64(0x0))
+	var x152 uint64
+	var x153 uint64
+	x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151)))
+	var x154 uint64
+	var x155 uint64
+	x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153)))
+	var x156 uint64
+	var x157 uint64
+	x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155)))
+	var x158 uint64
+	var x159 uint64
+	x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157)))
+	var x160 uint64
+	_, x160 = bits.Mul64(x150, 0xd2b51da312547e1b)
+	var x162 uint64
+	var x163 uint64
+	x163, x162 = bits.Mul64(x160, 0x1000000000000000)
+	var x164 uint64
+	var x165 uint64
+	x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6)
+	var x166 uint64
+	var x167 uint64
+	x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed)
+	var x168 uint64
+	var x169 uint64
+	x168, x169 = bits.Add64(x167, x164, uint64(0x0))
+	x170 := (uint64(fiatScalarUint1(x169)) + x165)
+	var x172 uint64
+	_, x172 = bits.Add64(x150, x166, uint64(0x0))
+	var x173 uint64
+	var x174 uint64
+	x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172)))
+	var x175 uint64
+	var x176 uint64
+	x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174)))
+	var x177 uint64
+	var x178 uint64
+	x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176)))
+	var x179 uint64
+	var x180 uint64
+	x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178)))
+	x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159)))
+	var x182 uint64
+	var x183 uint64
+	x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0))
+	var x184 uint64
+	var x185 uint64
+	x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183)))
+	var x186 uint64
+	var x187 uint64
+	x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185)))
+	var x188 uint64
+	var x189 uint64
+	x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187)))
+	var x191 uint64
+	_, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189)))
+	var x192 uint64
+	fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173)
+	var x193 uint64
+	fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175)
+	var x194 uint64
+	fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177)
+	var x195 uint64
+	fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179)
+	out1[0] = x192
+	out1[1] = x193
+	out1[2] = x194
+	out1[3] = x195
+}
+
+// fiatScalarAdd adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+//	0 ≤ eval arg1 < m
+//	0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+//	eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+//	0 ≤ eval out1 < m
+func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+	var x1 uint64
+	var x2 uint64
+	x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+	var x3 uint64
+	var x4 uint64
+	x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+	var x5 uint64
+	var x6 uint64
+	x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+	var x7 uint64
+	var x8 uint64
+	x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+	var x9 uint64
+	var x10 uint64
+	x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0))
+	var x11 uint64
+	var x12 uint64
+	x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10)))
+	var x13 uint64
+	var x14 uint64
+	x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12)))
+	var x15 uint64
+	var x16 uint64
+	x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14)))
+	var x18 uint64
+	_, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16)))
+	var x19 uint64
+	fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1)
+	var x20 uint64
+	fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3)
+	var x21 uint64
+	fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5)
+	var x22 uint64
+	fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7)
+	out1[0] = x19
+	out1[1] = x20
+	out1[2] = x21
+	out1[3] = x22
+}
+
+// fiatScalarSub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+//	0 ≤ eval arg1 < m
+//	0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+//	eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+//	0 ≤ eval out1 < m
+func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+	var x1 uint64
+	var x2 uint64
+	x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+	var x3 uint64
+	var x4 uint64
+	x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+	var x5 uint64
+	var x6 uint64
+	x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+	var x7 uint64
+	var x8 uint64
+	x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+	var x9 uint64
+	fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+	var x10 uint64
+	var x11 uint64
+	x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+	var x12 uint64
+	var x13 uint64
+	x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+	var x14 uint64
+	var x15 uint64
+	x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+	var x16 uint64
+	x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+	out1[0] = x10
+	out1[1] = x12
+	out1[2] = x14
+	out1[3] = x16
+}
+
+// fiatScalarOpp negates a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+//	0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+//	eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m
+//	0 ≤ eval out1 < m
+func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+	var x1 uint64
+	var x2 uint64
+	x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0))
+	var x3 uint64
+	var x4 uint64
+	x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2)))
+	var x5 uint64
+	var x6 uint64
+	x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4)))
+	var x7 uint64
+	var x8 uint64
+	x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6)))
+	var x9 uint64
+	fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+	var x10 uint64
+	var x11 uint64
+	x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+	var x12 uint64
+	var x13 uint64
+	x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+	var x14 uint64
+	var x15 uint64
+	x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+	var x16 uint64
+	x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+	out1[0] = x10
+	out1[1] = x12
+	out1[2] = x14
+	out1[3] = x16
+}
+
+// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise.
+//
+// Preconditions:
+//
+//	0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+//	out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0
+//
+// Input Bounds:
+//
+//	arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+//	out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) {
+	x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3])))
+	*out1 = x1
+}
+
+// fiatScalarFromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+//	0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+//	eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+//	0 ≤ eval out1 < m
+func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+	x1 := arg1[0]
+	var x2 uint64
+	_, x2 = bits.Mul64(x1, 0xd2b51da312547e1b)
+	var x4 uint64
+	var x5 uint64
+	x5, x4 = bits.Mul64(x2, 0x1000000000000000)
+	var x6 uint64
+	var x7 uint64
+	x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6)
+	var x8 uint64
+	var x9 uint64
+	x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed)
+	var x10 uint64
+	var x11 uint64
+	x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+	var x13 uint64
+	_, x13 = bits.Add64(x1, x8, uint64(0x0))
+	var x14 uint64
+	var x15 uint64
+	x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13)))
+	var x16 uint64
+	var x17 uint64
+	x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0))
+	var x18 uint64
+	_, x18 = bits.Mul64(x16, 0xd2b51da312547e1b)
+	var x20 uint64
+	var x21 uint64
+	x21, x20 = bits.Mul64(x18, 0x1000000000000000)
+	var x22 uint64
+	var x23 uint64
+	x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6)
+	var x24 uint64
+	var x25 uint64
+	x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed)
+	var x26 uint64
+	var x27 uint64
+	x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+	var x29 uint64
+	_, x29 = bits.Add64(x16, x24, uint64(0x0))
+	var x30 uint64
+	var x31 uint64
+	x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29)))
+	var x32 uint64
+	var x33 uint64
+	x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31)))
+	var x34 uint64
+	var x35 uint64
+	x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33)))
+	var x36 uint64
+	var x37 uint64
+	x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0))
+	var x38 uint64
+	var x39 uint64
+	x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37)))
+	var x40 uint64
+	var x41 uint64
+	x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39)))
+	var x42 uint64
+	_, x42 = bits.Mul64(x36, 0xd2b51da312547e1b)
+	var x44 uint64
+	var x45 uint64
+	x45, x44 = bits.Mul64(x42, 0x1000000000000000)
+	var x46 uint64
+	var x47 uint64
+	x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6)
+	var x48 uint64
+	var x49 uint64
+	x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed)
+	var x50 uint64
+	var x51 uint64
+	x50, x51 = bits.Add64(x49, x46, uint64(0x0))
+	var x53 uint64
+	_, x53 = bits.Add64(x36, x48, uint64(0x0))
+	var x54 uint64
+	var x55 uint64
+	x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53)))
+	var x56 uint64
+	var x57 uint64
+	x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55)))
+	var x58 uint64
+	var x59 uint64
+	x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57)))
+	var x60 uint64
+	var x61 uint64
+	x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0))
+	var x62 uint64
+	var x63 uint64
+	x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61)))
+	var x64 uint64
+	var x65 uint64
+	x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63)))
+	var x66 uint64
+	_, x66 = bits.Mul64(x60, 0xd2b51da312547e1b)
+	var x68 uint64
+	var x69 uint64
+	x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+	var x70 uint64
+	var x71 uint64
+	x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+	var x72 uint64
+	var x73 uint64
+	x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+	var x74 uint64
+	var x75 uint64
+	x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+	var x77 uint64
+	_, x77 = bits.Add64(x60, x72, uint64(0x0))
+	var x78 uint64
+	var x79 uint64
+	x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77)))
+	var x80 uint64
+	var x81 uint64
+	x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79)))
+	var x82 uint64
+	var x83 uint64
+	x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81)))
+	x84 := (uint64(fiatScalarUint1(x83)) + x69)
+	var x85 uint64
+	var x86 uint64
+	x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0))
+	var x87 uint64
+	var x88 uint64
+	x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86)))
+	var x89 uint64
+	var x90 uint64
+	x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88)))
+	var x91 uint64
+	var x92 uint64
+	x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90)))
+	var x94 uint64
+	_, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92)))
+	var x95 uint64
+	fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78)
+	var x96 uint64
+	fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80)
+	var x97 uint64
+	fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82)
+	var x98 uint64
+	fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84)
+	out1[0] = x95
+	out1[1] = x96
+	out1[2] = x97
+	out1[3] = x98
+}
+
+// fiatScalarToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+//	0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+//	eval (from_montgomery out1) mod m = eval arg1 mod m
+//	0 ≤ eval out1 < m
+func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) {
+	x1 := arg1[1]
+	x2 := arg1[2]
+	x3 := arg1[3]
+	x4 := arg1[0]
+	var x5 uint64
+	var x6 uint64
+	x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d)
+	var x7 uint64
+	var x8 uint64
+	x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65)
+	var x9 uint64
+	var x10 uint64
+	x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347)
+	var x11 uint64
+	var x12 uint64
+	x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01)
+	var x13 uint64
+	var x14 uint64
+	x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+	var x15 uint64
+	var x16 uint64
+	x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+	var x17 uint64
+	var x18 uint64
+	x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+	var x19 uint64
+	_, x19 = bits.Mul64(x11, 0xd2b51da312547e1b)
+	var x21 uint64
+	var x22 uint64
+	x22, x21 = bits.Mul64(x19, 0x1000000000000000)
+	var x23 uint64
+	var x24 uint64
+	x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6)
+	var x25 uint64
+	var x26 uint64
+	x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed)
+	var x27 uint64
+	var x28 uint64
+	x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+	var x30 uint64
+	_, x30 = bits.Add64(x11, x25, uint64(0x0))
+	var x31 uint64
+	var x32 uint64
+	x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30)))
+	var x33 uint64
+	var x34 uint64
+	x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32)))
+	var x35 uint64
+	var x36 uint64
+	x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34)))
+	var x37 uint64
+	var x38 uint64
+	x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d)
+	var x39 uint64
+	var x40 uint64
+	x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65)
+	var x41 uint64
+	var x42 uint64
+	x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347)
+	var x43 uint64
+	var x44 uint64
+	x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01)
+	var x45 uint64
+	var x46 uint64
+	x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+	var x47 uint64
+	var x48 uint64
+	x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46)))
+	var x49 uint64
+	var x50 uint64
+	x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48)))
+	var x51 uint64
+	var x52 uint64
+	x51, x52 = bits.Add64(x31, x43, uint64(0x0))
+	var x53 uint64
+	var x54 uint64
+	x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52)))
+	var x55 uint64
+	var x56 uint64
+	x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54)))
+	var x57 uint64
+	var x58 uint64
+	x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56)))
+	var x59 uint64
+	_, x59 = bits.Mul64(x51, 0xd2b51da312547e1b)
+	var x61 uint64
+	var x62 uint64
+	x62, x61 = bits.Mul64(x59, 0x1000000000000000)
+	var x63 uint64
+	var x64 uint64
+	x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6)
+	var x65 uint64
+	var x66 uint64
+	x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed)
+	var x67 uint64
+	var x68 uint64
+	x67, x68 = bits.Add64(x66, x63, uint64(0x0))
+	var x70 uint64
+	_, x70 = bits.Add64(x51, x65, uint64(0x0))
+	var x71 uint64
+	var x72 uint64
+	x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70)))
+	var x73 uint64
+	var x74 uint64
+	x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72)))
+	var x75 uint64
+	var x76 uint64
+	x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74)))
+	var x77 uint64
+	var x78 uint64
+	x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d)
+	var x79 uint64
+	var x80 uint64
+	x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65)
+	var x81 uint64
+	var x82 uint64
+	x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347)
+	var x83 uint64
+	var x84 uint64
+	x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01)
+	var x85 uint64
+	var x86 uint64
+	x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+	var x87 uint64
+	var x88 uint64
+	x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86)))
+	var x89 uint64
+	var x90 uint64
+	x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88)))
+	var x91 uint64
+	var x92 uint64
+	x91, x92 = bits.Add64(x71, x83, uint64(0x0))
+	var x93 uint64
+	var x94 uint64
+	x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92)))
+	var x95 uint64
+	var x96 uint64
+	x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94)))
+	var x97 uint64
+	var x98 uint64
+	x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96)))
+	var x99 uint64
+	_, x99 = bits.Mul64(x91, 0xd2b51da312547e1b)
+	var x101 uint64
+	var x102 uint64
+	x102, x101 = bits.Mul64(x99, 0x1000000000000000)
+	var x103 uint64
+	var x104 uint64
+	x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6)
+	var x105 uint64
+	var x106 uint64
+	x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed)
+	var x107 uint64
+	var x108 uint64
+	x107, x108 = bits.Add64(x106, x103, uint64(0x0))
+	var x110 uint64
+	_, x110 = bits.Add64(x91, x105, uint64(0x0))
+	var x111 uint64
+	var x112 uint64
+	x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110)))
+	var x113 uint64
+	var x114 uint64
+	x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112)))
+	var x115 uint64
+	var x116 uint64
+	x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114)))
+	var x117 uint64
+	var x118 uint64
+	x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d)
+	var x119 uint64
+	var x120 uint64
+	x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65)
+	var x121 uint64
+	var x122 uint64
+	x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347)
+	var x123 uint64
+	var x124 uint64
+	x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01)
+	var x125 uint64
+	var x126 uint64
+	x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+	var x127 uint64
+	var x128 uint64
+	x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126)))
+	var x129 uint64
+	var x130 uint64
+	x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128)))
+	var x131 uint64
+	var x132 uint64
+	x131, x132 = bits.Add64(x111, x123, uint64(0x0))
+	var x133 uint64
+	var x134 uint64
+	x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132)))
+	var x135 uint64
+	var x136 uint64
+	x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134)))
+	var x137 uint64
+	var x138 uint64
+	x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136)))
+	var x139 uint64
+	_, x139 = bits.Mul64(x131, 0xd2b51da312547e1b)
+	var x141 uint64
+	var x142 uint64
+	x142, x141 = bits.Mul64(x139, 0x1000000000000000)
+	var x143 uint64
+	var x144 uint64
+	x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6)
+	var x145 uint64
+	var x146 uint64
+	x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed)
+	var x147 uint64
+	var x148 uint64
+	x147, x148 = bits.Add64(x146, x143, uint64(0x0))
+	var x150 uint64
+	_, x150 = bits.Add64(x131, x145, uint64(0x0))
+	var x151 uint64
+	var x152 uint64
+	x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150)))
+	var x153 uint64
+	var x154 uint64
+	x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152)))
+	var x155 uint64
+	var x156 uint64
+	x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154)))
+	x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142)
+	var x158 uint64
+	var x159 uint64
+	x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0))
+	var x160 uint64
+	var x161 uint64
+	x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159)))
+	var x162 uint64
+	var x163 uint64
+	x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161)))
+	var x164 uint64
+	var x165 uint64
+	x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163)))
+	var x167 uint64
+	_, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165)))
+	var x168 uint64
+	fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151)
+	var x169 uint64
+	fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153)
+	var x170 uint64
+	fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155)
+	var x171 uint64
+	fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157)
+	out1[0] = x168
+	out1[1] = x169
+	out1[2] = x170
+	out1[3] = x171
+}
+
+// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+//	0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+//	out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+//	arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+//
+// Output Bounds:
+//
+//	out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+	x1 := arg1[3]
+	x2 := arg1[2]
+	x3 := arg1[1]
+	x4 := arg1[0]
+	x5 := (uint8(x4) & 0xff)
+	x6 := (x4 >> 8)
+	x7 := (uint8(x6) & 0xff)
+	x8 := (x6 >> 8)
+	x9 := (uint8(x8) & 0xff)
+	x10 := (x8 >> 8)
+	x11 := (uint8(x10) & 0xff)
+	x12 := (x10 >> 8)
+	x13 := (uint8(x12) & 0xff)
+	x14 := (x12 >> 8)
+	x15 := (uint8(x14) & 0xff)
+	x16 := (x14 >> 8)
+	x17 := (uint8(x16) & 0xff)
+	x18 := uint8((x16 >> 8))
+	x19 := (uint8(x3) & 0xff)
+	x20 := (x3 >> 8)
+	x21 := (uint8(x20) & 0xff)
+	x22 := (x20 >> 8)
+	x23 := (uint8(x22) & 0xff)
+	x24 := (x22 >> 8)
+	x25 := (uint8(x24) & 0xff)
+	x26 := (x24 >> 8)
+	x27 := (uint8(x26) & 0xff)
+	x28 := (x26 >> 8)
+	x29 := (uint8(x28) & 0xff)
+	x30 := (x28 >> 8)
+	x31 := (uint8(x30) & 0xff)
+	x32 := uint8((x30 >> 8))
+	x33 := (uint8(x2) & 0xff)
+	x34 := (x2 >> 8)
+	x35 := (uint8(x34) & 0xff)
+	x36 := (x34 >> 8)
+	x37 := (uint8(x36) & 0xff)
+	x38 := (x36 >> 8)
+	x39 := (uint8(x38) & 0xff)
+	x40 := (x38 >> 8)
+	x41 := (uint8(x40) & 0xff)
+	x42 := (x40 >> 8)
+	x43 := (uint8(x42) & 0xff)
+	x44 := (x42 >> 8)
+	x45 := (uint8(x44) & 0xff)
+	x46 := uint8((x44 >> 8))
+	x47 := (uint8(x1) & 0xff)
+	x48 := (x1 >> 8)
+	x49 := (uint8(x48) & 0xff)
+	x50 := (x48 >> 8)
+	x51 := (uint8(x50) & 0xff)
+	x52 := (x50 >> 8)
+	x53 := (uint8(x52) & 0xff)
+	x54 := (x52 >> 8)
+	x55 := (uint8(x54) & 0xff)
+	x56 := (x54 >> 8)
+	x57 := (uint8(x56) & 0xff)
+	x58 := (x56 >> 8)
+	x59 := (uint8(x58) & 0xff)
+	x60 := uint8((x58 >> 8))
+	out1[0] = x5
+	out1[1] = x7
+	out1[2] = x9
+	out1[3] = x11
+	out1[4] = x13
+	out1[5] = x15
+	out1[6] = x17
+	out1[7] = x18
+	out1[8] = x19
+	out1[9] = x21
+	out1[10] = x23
+	out1[11] = x25
+	out1[12] = x27
+	out1[13] = x29
+	out1[14] = x31
+	out1[15] = x32
+	out1[16] = x33
+	out1[17] = x35
+	out1[18] = x37
+	out1[19] = x39
+	out1[20] = x41
+	out1[21] = x43
+	out1[22] = x45
+	out1[23] = x46
+	out1[24] = x47
+	out1[25] = x49
+	out1[26] = x51
+	out1[27] = x53
+	out1[28] = x55
+	out1[29] = x57
+	out1[30] = x59
+	out1[31] = x60
+}
+
+// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+//	0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+//	eval out1 mod m = bytes_eval arg1 mod m
+//	0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+//	arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+//
+// Output Bounds:
+//
+//	out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+	x1 := (uint64(arg1[31]) << 56)
+	x2 := (uint64(arg1[30]) << 48)
+	x3 := (uint64(arg1[29]) << 40)
+	x4 := (uint64(arg1[28]) << 32)
+	x5 := (uint64(arg1[27]) << 24)
+	x6 := (uint64(arg1[26]) << 16)
+	x7 := (uint64(arg1[25]) << 8)
+	x8 := arg1[24]
+	x9 := (uint64(arg1[23]) << 56)
+	x10 := (uint64(arg1[22]) << 48)
+	x11 := (uint64(arg1[21]) << 40)
+	x12 := (uint64(arg1[20]) << 32)
+	x13 := (uint64(arg1[19]) << 24)
+	x14 := (uint64(arg1[18]) << 16)
+	x15 := (uint64(arg1[17]) << 8)
+	x16 := arg1[16]
+	x17 := (uint64(arg1[15]) << 56)
+	x18 := (uint64(arg1[14]) << 48)
+	x19 := (uint64(arg1[13]) << 40)
+	x20 := (uint64(arg1[12]) << 32)
+	x21 := (uint64(arg1[11]) << 24)
+	x22 := (uint64(arg1[10]) << 16)
+	x23 := (uint64(arg1[9]) << 8)
+	x24 := arg1[8]
+	x25 := (uint64(arg1[7]) << 56)
+	x26 := (uint64(arg1[6]) << 48)
+	x27 := (uint64(arg1[5]) << 40)
+	x28 := (uint64(arg1[4]) << 32)
+	x29 := (uint64(arg1[3]) << 24)
+	x30 := (uint64(arg1[2]) << 16)
+	x31 := (uint64(arg1[1]) << 8)
+	x32 := arg1[0]
+	x33 := (x31 + uint64(x32))
+	x34 := (x30 + x33)
+	x35 := (x29 + x34)
+	x36 := (x28 + x35)
+	x37 := (x27 + x36)
+	x38 := (x26 + x37)
+	x39 := (x25 + x38)
+	x40 := (x23 + uint64(x24))
+	x41 := (x22 + x40)
+	x42 := (x21 + x41)
+	x43 := (x20 + x42)
+	x44 := (x19 + x43)
+	x45 := (x18 + x44)
+	x46 := (x17 + x45)
+	x47 := (x15 + uint64(x16))
+	x48 := (x14 + x47)
+	x49 := (x13 + x48)
+	x50 := (x12 + x49)
+	x51 := (x11 + x50)
+	x52 := (x10 + x51)
+	x53 := (x9 + x52)
+	x54 := (x7 + uint64(x8))
+	x55 := (x6 + x54)
+	x56 := (x5 + x55)
+	x57 := (x4 + x56)
+	x58 := (x3 + x57)
+	x59 := (x2 + x58)
+	x60 := (x1 + x59)
+	out1[0] = x39
+	out1[1] = x46
+	out1[2] = x53
+	out1[3] = x60
+}
diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go
new file mode 100644
index 0000000..f7ca3ce
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalarmult.go
@@ -0,0 +1,214 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import "sync"
+
+// basepointTable is a set of 32 affineLookupTables, where table i is generated
+// from 256i * basepoint. It is precomputed the first time it's used.
+func basepointTable() *[32]affineLookupTable {
+	basepointTablePrecomp.initOnce.Do(func() {
+		p := NewGeneratorPoint()
+		for i := 0; i < 32; i++ {
+			basepointTablePrecomp.table[i].FromP3(p)
+			for j := 0; j < 8; j++ {
+				p.Add(p, p)
+			}
+		}
+	})
+	return &basepointTablePrecomp.table
+}
+
+var basepointTablePrecomp struct {
+	table    [32]affineLookupTable
+	initOnce sync.Once
+}
+
+// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
+// returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarBaseMult(x *Scalar) *Point {
+	basepointTable := basepointTable()
+
+	// Write x = sum(x_i * 16^i) so  x*B = sum( B*x_i*16^i )
+	// as described in the Ed25519 paper
+	//
+	// Group even and odd coefficients
+	// x*B     = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+	//         + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
+	// x*B     = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+	//    + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
+	//
+	// We use a lookup table for each i to get x_i*16^(2*i)*B
+	// and do four doublings to multiply by 16.
+	digits := x.signedRadix16()
+
+	multiple := &affineCached{}
+	tmp1 := &projP1xP1{}
+	tmp2 := &projP2{}
+
+	// Accumulate the odd components first
+	v.Set(NewIdentityPoint())
+	for i := 1; i < 64; i += 2 {
+		basepointTable[i/2].SelectInto(multiple, digits[i])
+		tmp1.AddAffine(v, multiple)
+		v.fromP1xP1(tmp1)
+	}
+
+	// Multiply by 16
+	tmp2.FromP3(v)       // tmp2 =    v in P2 coords
+	tmp1.Double(tmp2)    // tmp1 =  2*v in P1xP1 coords
+	tmp2.FromP1xP1(tmp1) // tmp2 =  2*v in P2 coords
+	tmp1.Double(tmp2)    // tmp1 =  4*v in P1xP1 coords
+	tmp2.FromP1xP1(tmp1) // tmp2 =  4*v in P2 coords
+	tmp1.Double(tmp2)    // tmp1 =  8*v in P1xP1 coords
+	tmp2.FromP1xP1(tmp1) // tmp2 =  8*v in P2 coords
+	tmp1.Double(tmp2)    // tmp1 = 16*v in P1xP1 coords
+	v.fromP1xP1(tmp1)    // now v = 16*(odd components)
+
+	// Accumulate the even components
+	for i := 0; i < 64; i += 2 {
+		basepointTable[i/2].SelectInto(multiple, digits[i])
+		tmp1.AddAffine(v, multiple)
+		v.fromP1xP1(tmp1)
+	}
+
+	return v
+}
+
+// ScalarMult sets v = x * q, and returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
+	checkInitialized(q)
+
+	var table projLookupTable
+	table.FromP3(q)
+
+	// Write x = sum(x_i * 16^i)
+	// so  x*Q = sum( Q*x_i*16^i )
+	//         = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
+	//           <------compute inside out---------
+	//
+	// We use the lookup table to get the x_i*Q values
+	// and do four doublings to compute 16*Q
+	digits := x.signedRadix16()
+
+	// Unwrap first loop iteration to save computing 16*identity
+	multiple := &projCached{}
+	tmp1 := &projP1xP1{}
+	tmp2 := &projP2{}
+	table.SelectInto(multiple, digits[63])
+
+	v.Set(NewIdentityPoint())
+	tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
+	for i := 62; i >= 0; i-- {
+		tmp2.FromP1xP1(tmp1) // tmp2 =    (prev) in P2 coords
+		tmp1.Double(tmp2)    // tmp1 =  2*(prev) in P1xP1 coords
+		tmp2.FromP1xP1(tmp1) // tmp2 =  2*(prev) in P2 coords
+		tmp1.Double(tmp2)    // tmp1 =  4*(prev) in P1xP1 coords
+		tmp2.FromP1xP1(tmp1) // tmp2 =  4*(prev) in P2 coords
+		tmp1.Double(tmp2)    // tmp1 =  8*(prev) in P1xP1 coords
+		tmp2.FromP1xP1(tmp1) // tmp2 =  8*(prev) in P2 coords
+		tmp1.Double(tmp2)    // tmp1 = 16*(prev) in P1xP1 coords
+		v.fromP1xP1(tmp1)    //    v = 16*(prev) in P3 coords
+		table.SelectInto(multiple, digits[i])
+		tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
+	}
+	v.fromP1xP1(tmp1)
+	return v
+}
+
+// basepointNafTable is the nafLookupTable8 for the basepoint.
+// It is precomputed the first time it's used.
+func basepointNafTable() *nafLookupTable8 {
+	basepointNafTablePrecomp.initOnce.Do(func() {
+		basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
+	})
+	return &basepointNafTablePrecomp.table
+}
+
+var basepointNafTablePrecomp struct {
+	table    nafLookupTable8
+	initOnce sync.Once
+}
+
+// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
+// generator, and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
+	checkInitialized(A)
+
+	// Similarly to the single variable-base approach, we compute
+	// digits and use them with a lookup table.  However, because
+	// we are allowed to do variable-time operations, we don't
+	// need constant-time lookups or constant-time digit
+	// computations.
+	//
+	// So we use a non-adjacent form of some width w instead of
+	// radix 16.  This is like a binary representation (one digit
+	// for each binary place) but we allow the digits to grow in
+	// magnitude up to 2^{w-1} so that the nonzero digits are as
+	// sparse as possible.  Intuitively, this "condenses" the
+	// "mass" of the scalar onto sparse coefficients (meaning
+	// fewer additions).
+
+	basepointNafTable := basepointNafTable()
+	var aTable nafLookupTable5
+	aTable.FromP3(A)
+	// Because the basepoint is fixed, we can use a wider NAF
+	// corresponding to a bigger table.
+	aNaf := a.nonAdjacentForm(5)
+	bNaf := b.nonAdjacentForm(8)
+
+	// Find the first nonzero coefficient.
+	i := 255
+	for j := i; j >= 0; j-- {
+		if aNaf[j] != 0 || bNaf[j] != 0 {
+			break
+		}
+	}
+
+	multA := &projCached{}
+	multB := &affineCached{}
+	tmp1 := &projP1xP1{}
+	tmp2 := &projP2{}
+	tmp2.Zero()
+
+	// Move from high to low bits, doubling the accumulator
+	// at each iteration and checking whether there is a nonzero
+	// coefficient to look up a multiple of.
+	for ; i >= 0; i-- {
+		tmp1.Double(tmp2)
+
+		// Only update v if we have a nonzero coeff to add in.
+		if aNaf[i] > 0 {
+			v.fromP1xP1(tmp1)
+			aTable.SelectInto(multA, aNaf[i])
+			tmp1.Add(v, multA)
+		} else if aNaf[i] < 0 {
+			v.fromP1xP1(tmp1)
+			aTable.SelectInto(multA, -aNaf[i])
+			tmp1.Sub(v, multA)
+		}
+
+		if bNaf[i] > 0 {
+			v.fromP1xP1(tmp1)
+			basepointNafTable.SelectInto(multB, bNaf[i])
+			tmp1.AddAffine(v, multB)
+		} else if bNaf[i] < 0 {
+			v.fromP1xP1(tmp1)
+			basepointNafTable.SelectInto(multB, -bNaf[i])
+			tmp1.SubAffine(v, multB)
+		}
+
+		tmp2.FromP1xP1(tmp1)
+	}
+
+	v.fromP2(tmp2)
+	return v
+}
diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go
new file mode 100644
index 0000000..83234bb
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/tables.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+	"crypto/subtle"
+)
+
+// A dynamic lookup table for variable-base, constant-time scalar muls.
+type projLookupTable struct {
+	points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, constant-time scalar muls.
+type affineLookupTable struct {
+	points [8]affineCached
+}
+
+// A dynamic lookup table for variable-base, variable-time scalar muls.
+type nafLookupTable5 struct {
+	points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, variable-time scalar muls.
+type nafLookupTable8 struct {
+	points [64]affineCached
+}
+
+// Constructors.
+
+// Builds a lookup table at runtime. Fast.
+func (v *projLookupTable) FromP3(q *Point) {
+	// Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+	// This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+	v.points[0].FromP3(q)
+	tmpP3 := Point{}
+	tmpP1xP1 := projP1xP1{}
+	for i := 0; i < 7; i++ {
+		// Compute (i+1)*Q as Q + i*Q and convert to a projCached
+		// This is needlessly complicated because the API has explicit
+		// receivers instead of creating stack objects and relying on RVO
+		v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
+	}
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *affineLookupTable) FromP3(q *Point) {
+	// Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+	// This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+	v.points[0].FromP3(q)
+	tmpP3 := Point{}
+	tmpP1xP1 := projP1xP1{}
+	for i := 0; i < 7; i++ {
+		// Compute (i+1)*Q as Q + i*Q and convert to affineCached
+		v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
+	}
+}
+
+// Builds a lookup table at runtime. Fast.
+func (v *nafLookupTable5) FromP3(q *Point) {
+	// Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
+	// This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
+	v.points[0].FromP3(q)
+	q2 := Point{}
+	q2.Add(q, q)
+	tmpP3 := Point{}
+	tmpP1xP1 := projP1xP1{}
+	for i := 0; i < 7; i++ {
+		v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
+	}
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *nafLookupTable8) FromP3(q *Point) {
+	v.points[0].FromP3(q)
+	q2 := Point{}
+	q2.Add(q, q)
+	tmpP3 := Point{}
+	tmpP1xP1 := projP1xP1{}
+	for i := 0; i < 63; i++ {
+		v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
+	}
+}
+
+// Selectors.
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
+	// Compute xabs = |x|
+	xmask := x >> 7
+	xabs := uint8((x + xmask) ^ xmask)
+
+	dest.Zero()
+	for j := 1; j <= 8; j++ {
+		// Set dest = j*Q if |x| = j
+		cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+		dest.Select(&v.points[j-1], dest, cond)
+	}
+	// Now dest = |x|*Q, conditionally negate to get x*Q
+	dest.CondNeg(int(xmask & 1))
+}
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
+	// Compute xabs = |x|
+	xmask := x >> 7
+	xabs := uint8((x + xmask) ^ xmask)
+
+	dest.Zero()
+	for j := 1; j <= 8; j++ {
+		// Set dest = j*Q if |x| = j
+		cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+		dest.Select(&v.points[j-1], dest, cond)
+	}
+	// Now dest = |x|*Q, conditionally negate to get x*Q
+	dest.CondNeg(int(xmask & 1))
+}
+
+// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
+func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
+	*dest = v.points[x/2]
+}
+
+// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
+func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
+	*dest = v.points[x/2]
+}