summary refs log tree commit diff
path: root/vendor/filippo.io/edwards25519/field
diff options
context:
space:
mode:
authorEmile <git@emile.space>2024-10-25 15:55:50 +0200
committerEmile <git@emile.space>2024-10-25 15:55:50 +0200
commitc90f36e3dd179d2de96f4f5fe38d8dc9a9de6dfe (patch)
tree89e9afb41c5bf76f48cfb09305a2d3db8d302b06 /vendor/filippo.io/edwards25519/field
parent98bbb0f559a8883bc47bae80607dbe326a448e61 (diff)
vendor HEAD main
Diffstat (limited to 'vendor/filippo.io/edwards25519/field')
-rw-r--r--vendor/filippo.io/edwards25519/field/fe.go420
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_amd64.go16
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_amd64.s379
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go12
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_arm64.go16
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_arm64.s42
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go12
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_extra.go50
-rw-r--r--vendor/filippo.io/edwards25519/field/fe_generic.go266
9 files changed, 1213 insertions, 0 deletions
diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go
new file mode 100644
index 0000000..5518ef2
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe.go
@@ -0,0 +1,420 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package field implements fast arithmetic modulo 2^255-19.
+package field
+
+import (
+	"crypto/subtle"
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+// Element represents an element of the field GF(2^255-19). Note that this
+// is not a cryptographically secure group, and should only be used to interact
+// with edwards25519.Point coordinates.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Element struct {
+	// An element t represents the integer
+	//     t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
+	//
+	// Between operations, all limbs are expected to be lower than 2^52.
+	l0 uint64
+	l1 uint64
+	l2 uint64
+	l3 uint64
+	l4 uint64
+}
+
+const maskLow51Bits uint64 = (1 << 51) - 1
+
+var feZero = &Element{0, 0, 0, 0, 0}
+
+// Zero sets v = 0, and returns v.
+func (v *Element) Zero() *Element {
+	*v = *feZero
+	return v
+}
+
+var feOne = &Element{1, 0, 0, 0, 0}
+
+// One sets v = 1, and returns v.
+func (v *Element) One() *Element {
+	*v = *feOne
+	return v
+}
+
+// reduce reduces v modulo 2^255 - 19 and returns it.
+func (v *Element) reduce() *Element {
+	v.carryPropagate()
+
+	// After the light reduction we now have a field element representation
+	// v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
+
+	// If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
+	// generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
+	c := (v.l0 + 19) >> 51
+	c = (v.l1 + c) >> 51
+	c = (v.l2 + c) >> 51
+	c = (v.l3 + c) >> 51
+	c = (v.l4 + c) >> 51
+
+	// If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
+	// effectively applying the reduction identity to the carry.
+	v.l0 += 19 * c
+
+	v.l1 += v.l0 >> 51
+	v.l0 = v.l0 & maskLow51Bits
+	v.l2 += v.l1 >> 51
+	v.l1 = v.l1 & maskLow51Bits
+	v.l3 += v.l2 >> 51
+	v.l2 = v.l2 & maskLow51Bits
+	v.l4 += v.l3 >> 51
+	v.l3 = v.l3 & maskLow51Bits
+	// no additional carry
+	v.l4 = v.l4 & maskLow51Bits
+
+	return v
+}
+
+// Add sets v = a + b, and returns v.
+func (v *Element) Add(a, b *Element) *Element {
+	v.l0 = a.l0 + b.l0
+	v.l1 = a.l1 + b.l1
+	v.l2 = a.l2 + b.l2
+	v.l3 = a.l3 + b.l3
+	v.l4 = a.l4 + b.l4
+	// Using the generic implementation here is actually faster than the
+	// assembly. Probably because the body of this function is so simple that
+	// the compiler can figure out better optimizations by inlining the carry
+	// propagation.
+	return v.carryPropagateGeneric()
+}
+
+// Subtract sets v = a - b, and returns v.
+func (v *Element) Subtract(a, b *Element) *Element {
+	// We first add 2 * p, to guarantee the subtraction won't underflow, and
+	// then subtract b (which can be up to 2^255 + 2^13 * 19).
+	v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
+	v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
+	v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
+	v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
+	v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
+	return v.carryPropagate()
+}
+
+// Negate sets v = -a, and returns v.
+func (v *Element) Negate(a *Element) *Element {
+	return v.Subtract(feZero, a)
+}
+
+// Invert sets v = 1/z mod p, and returns v.
+//
+// If z == 0, Invert returns v = 0.
+func (v *Element) Invert(z *Element) *Element {
+	// Inversion is implemented as exponentiation with exponent p − 2. It uses the
+	// same sequence of 255 squarings and 11 multiplications as [Curve25519].
+	var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
+
+	z2.Square(z)             // 2
+	t.Square(&z2)            // 4
+	t.Square(&t)             // 8
+	z9.Multiply(&t, z)       // 9
+	z11.Multiply(&z9, &z2)   // 11
+	t.Square(&z11)           // 22
+	z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
+
+	t.Square(&z2_5_0) // 2^6 - 2^1
+	for i := 0; i < 4; i++ {
+		t.Square(&t) // 2^10 - 2^5
+	}
+	z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
+
+	t.Square(&z2_10_0) // 2^11 - 2^1
+	for i := 0; i < 9; i++ {
+		t.Square(&t) // 2^20 - 2^10
+	}
+	z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
+
+	t.Square(&z2_20_0) // 2^21 - 2^1
+	for i := 0; i < 19; i++ {
+		t.Square(&t) // 2^40 - 2^20
+	}
+	t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
+
+	t.Square(&t) // 2^41 - 2^1
+	for i := 0; i < 9; i++ {
+		t.Square(&t) // 2^50 - 2^10
+	}
+	z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
+
+	t.Square(&z2_50_0) // 2^51 - 2^1
+	for i := 0; i < 49; i++ {
+		t.Square(&t) // 2^100 - 2^50
+	}
+	z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
+
+	t.Square(&z2_100_0) // 2^101 - 2^1
+	for i := 0; i < 99; i++ {
+		t.Square(&t) // 2^200 - 2^100
+	}
+	t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
+
+	t.Square(&t) // 2^201 - 2^1
+	for i := 0; i < 49; i++ {
+		t.Square(&t) // 2^250 - 2^50
+	}
+	t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
+
+	t.Square(&t) // 2^251 - 2^1
+	t.Square(&t) // 2^252 - 2^2
+	t.Square(&t) // 2^253 - 2^3
+	t.Square(&t) // 2^254 - 2^4
+	t.Square(&t) // 2^255 - 2^5
+
+	return v.Multiply(&t, &z11) // 2^255 - 21
+}
+
+// Set sets v = a, and returns v.
+func (v *Element) Set(a *Element) *Element {
+	*v = *a
+	return v
+}
+
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
+//
+// Consistent with RFC 7748, the most significant bit (the high bit of the
+// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
+	if len(x) != 32 {
+		return nil, errors.New("edwards25519: invalid field element input size")
+	}
+
+	// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
+	v.l0 = binary.LittleEndian.Uint64(x[0:8])
+	v.l0 &= maskLow51Bits
+	// Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
+	v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
+	v.l1 &= maskLow51Bits
+	// Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
+	v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
+	v.l2 &= maskLow51Bits
+	// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
+	v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
+	v.l3 &= maskLow51Bits
+	// Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
+	// Note: not bytes 25:33, shift 4, to avoid overread.
+	v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
+	v.l4 &= maskLow51Bits
+
+	return v, nil
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of v.
+func (v *Element) Bytes() []byte {
+	// This function is outlined to make the allocations inline in the caller
+	// rather than happen on the heap.
+	var out [32]byte
+	return v.bytes(&out)
+}
+
+func (v *Element) bytes(out *[32]byte) []byte {
+	t := *v
+	t.reduce()
+
+	var buf [8]byte
+	for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
+		bitsOffset := i * 51
+		binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
+		for i, bb := range buf {
+			off := bitsOffset/8 + i
+			if off >= len(out) {
+				break
+			}
+			out[off] |= bb
+		}
+	}
+
+	return out[:]
+}
+
+// Equal returns 1 if v and u are equal, and 0 otherwise.
+func (v *Element) Equal(u *Element) int {
+	sa, sv := u.Bytes(), v.Bytes()
+	return subtle.ConstantTimeCompare(sa, sv)
+}
+
+// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
+func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *Element) Select(a, b *Element, cond int) *Element {
+	m := mask64Bits(cond)
+	v.l0 = (m & a.l0) | (^m & b.l0)
+	v.l1 = (m & a.l1) | (^m & b.l1)
+	v.l2 = (m & a.l2) | (^m & b.l2)
+	v.l3 = (m & a.l3) | (^m & b.l3)
+	v.l4 = (m & a.l4) | (^m & b.l4)
+	return v
+}
+
+// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
+func (v *Element) Swap(u *Element, cond int) {
+	m := mask64Bits(cond)
+	t := m & (v.l0 ^ u.l0)
+	v.l0 ^= t
+	u.l0 ^= t
+	t = m & (v.l1 ^ u.l1)
+	v.l1 ^= t
+	u.l1 ^= t
+	t = m & (v.l2 ^ u.l2)
+	v.l2 ^= t
+	u.l2 ^= t
+	t = m & (v.l3 ^ u.l3)
+	v.l3 ^= t
+	u.l3 ^= t
+	t = m & (v.l4 ^ u.l4)
+	v.l4 ^= t
+	u.l4 ^= t
+}
+
+// IsNegative returns 1 if v is negative, and 0 otherwise.
+func (v *Element) IsNegative() int {
+	return int(v.Bytes()[0] & 1)
+}
+
+// Absolute sets v to |u|, and returns v.
+func (v *Element) Absolute(u *Element) *Element {
+	return v.Select(new(Element).Negate(u), u, u.IsNegative())
+}
+
+// Multiply sets v = x * y, and returns v.
+func (v *Element) Multiply(x, y *Element) *Element {
+	feMul(v, x, y)
+	return v
+}
+
+// Square sets v = x * x, and returns v.
+func (v *Element) Square(x *Element) *Element {
+	feSquare(v, x)
+	return v
+}
+
+// Mult32 sets v = x * y, and returns v.
+func (v *Element) Mult32(x *Element, y uint32) *Element {
+	x0lo, x0hi := mul51(x.l0, y)
+	x1lo, x1hi := mul51(x.l1, y)
+	x2lo, x2hi := mul51(x.l2, y)
+	x3lo, x3hi := mul51(x.l3, y)
+	x4lo, x4hi := mul51(x.l4, y)
+	v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
+	v.l1 = x1lo + x0hi
+	v.l2 = x2lo + x1hi
+	v.l3 = x3lo + x2hi
+	v.l4 = x4lo + x3hi
+	// The hi portions are going to be only 32 bits, plus any previous excess,
+	// so we can skip the carry propagation.
+	return v
+}
+
+// mul51 returns lo + hi * 2⁵¹ = a * b.
+func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
+	mh, ml := bits.Mul64(a, uint64(b))
+	lo = ml & maskLow51Bits
+	hi = (mh << 13) | (ml >> 51)
+	return
+}
+
+// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
+func (v *Element) Pow22523(x *Element) *Element {
+	var t0, t1, t2 Element
+
+	t0.Square(x)             // x^2
+	t1.Square(&t0)           // x^4
+	t1.Square(&t1)           // x^8
+	t1.Multiply(x, &t1)      // x^9
+	t0.Multiply(&t0, &t1)    // x^11
+	t0.Square(&t0)           // x^22
+	t0.Multiply(&t1, &t0)    // x^31
+	t1.Square(&t0)           // x^62
+	for i := 1; i < 5; i++ { // x^992
+		t1.Square(&t1)
+	}
+	t0.Multiply(&t1, &t0)     // x^1023 -> 1023 = 2^10 - 1
+	t1.Square(&t0)            // 2^11 - 2
+	for i := 1; i < 10; i++ { // 2^20 - 2^10
+		t1.Square(&t1)
+	}
+	t1.Multiply(&t1, &t0)     // 2^20 - 1
+	t2.Square(&t1)            // 2^21 - 2
+	for i := 1; i < 20; i++ { // 2^40 - 2^20
+		t2.Square(&t2)
+	}
+	t1.Multiply(&t2, &t1)     // 2^40 - 1
+	t1.Square(&t1)            // 2^41 - 2
+	for i := 1; i < 10; i++ { // 2^50 - 2^10
+		t1.Square(&t1)
+	}
+	t0.Multiply(&t1, &t0)     // 2^50 - 1
+	t1.Square(&t0)            // 2^51 - 2
+	for i := 1; i < 50; i++ { // 2^100 - 2^50
+		t1.Square(&t1)
+	}
+	t1.Multiply(&t1, &t0)      // 2^100 - 1
+	t2.Square(&t1)             // 2^101 - 2
+	for i := 1; i < 100; i++ { // 2^200 - 2^100
+		t2.Square(&t2)
+	}
+	t1.Multiply(&t2, &t1)     // 2^200 - 1
+	t1.Square(&t1)            // 2^201 - 2
+	for i := 1; i < 50; i++ { // 2^250 - 2^50
+		t1.Square(&t1)
+	}
+	t0.Multiply(&t1, &t0)     // 2^250 - 1
+	t0.Square(&t0)            // 2^251 - 2
+	t0.Square(&t0)            // 2^252 - 4
+	return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
+}
+
+// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
+var sqrtM1 = &Element{1718705420411056, 234908883556509,
+	2233514472574048, 2117202627021982, 765476049583133}
+
+// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
+//
+// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
+// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
+// and returns r and 0.
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+	t0 := new(Element)
+
+	// r = (u * v3) * (u * v7)^((p-5)/8)
+	v2 := new(Element).Square(v)
+	uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+	uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+	rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
+
+	check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
+
+	uNeg := new(Element).Negate(u)
+	correctSignSqrt := check.Equal(u)
+	flippedSignSqrt := check.Equal(uNeg)
+	flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
+
+	rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
+	// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
+	rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
+
+	r.Absolute(rr) // Choose the nonnegative square root.
+	return r, correctSignSqrt | flippedSignSqrt
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go
new file mode 100644
index 0000000..edcf163
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64.go
@@ -0,0 +1,16 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s
new file mode 100644
index 0000000..293f013
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64.s
@@ -0,0 +1,379 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+#include "textflag.h"
+
+// func feMul(out *Element, a *Element, b *Element)
+TEXT ·feMul(SB), NOSPLIT, $0-24
+	MOVQ a+8(FP), CX
+	MOVQ b+16(FP), BX
+
+	// r0 = a0×b0
+	MOVQ (CX), AX
+	MULQ (BX)
+	MOVQ AX, DI
+	MOVQ DX, SI
+
+	// r0 += 19×a1×b4
+	MOVQ   8(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(BX)
+	ADDQ   AX, DI
+	ADCQ   DX, SI
+
+	// r0 += 19×a2×b3
+	MOVQ   16(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   24(BX)
+	ADDQ   AX, DI
+	ADCQ   DX, SI
+
+	// r0 += 19×a3×b2
+	MOVQ   24(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   16(BX)
+	ADDQ   AX, DI
+	ADCQ   DX, SI
+
+	// r0 += 19×a4×b1
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   8(BX)
+	ADDQ   AX, DI
+	ADCQ   DX, SI
+
+	// r1 = a0×b1
+	MOVQ (CX), AX
+	MULQ 8(BX)
+	MOVQ AX, R9
+	MOVQ DX, R8
+
+	// r1 += a1×b0
+	MOVQ 8(CX), AX
+	MULQ (BX)
+	ADDQ AX, R9
+	ADCQ DX, R8
+
+	// r1 += 19×a2×b4
+	MOVQ   16(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(BX)
+	ADDQ   AX, R9
+	ADCQ   DX, R8
+
+	// r1 += 19×a3×b3
+	MOVQ   24(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   24(BX)
+	ADDQ   AX, R9
+	ADCQ   DX, R8
+
+	// r1 += 19×a4×b2
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   16(BX)
+	ADDQ   AX, R9
+	ADCQ   DX, R8
+
+	// r2 = a0×b2
+	MOVQ (CX), AX
+	MULQ 16(BX)
+	MOVQ AX, R11
+	MOVQ DX, R10
+
+	// r2 += a1×b1
+	MOVQ 8(CX), AX
+	MULQ 8(BX)
+	ADDQ AX, R11
+	ADCQ DX, R10
+
+	// r2 += a2×b0
+	MOVQ 16(CX), AX
+	MULQ (BX)
+	ADDQ AX, R11
+	ADCQ DX, R10
+
+	// r2 += 19×a3×b4
+	MOVQ   24(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(BX)
+	ADDQ   AX, R11
+	ADCQ   DX, R10
+
+	// r2 += 19×a4×b3
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   24(BX)
+	ADDQ   AX, R11
+	ADCQ   DX, R10
+
+	// r3 = a0×b3
+	MOVQ (CX), AX
+	MULQ 24(BX)
+	MOVQ AX, R13
+	MOVQ DX, R12
+
+	// r3 += a1×b2
+	MOVQ 8(CX), AX
+	MULQ 16(BX)
+	ADDQ AX, R13
+	ADCQ DX, R12
+
+	// r3 += a2×b1
+	MOVQ 16(CX), AX
+	MULQ 8(BX)
+	ADDQ AX, R13
+	ADCQ DX, R12
+
+	// r3 += a3×b0
+	MOVQ 24(CX), AX
+	MULQ (BX)
+	ADDQ AX, R13
+	ADCQ DX, R12
+
+	// r3 += 19×a4×b4
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(BX)
+	ADDQ   AX, R13
+	ADCQ   DX, R12
+
+	// r4 = a0×b4
+	MOVQ (CX), AX
+	MULQ 32(BX)
+	MOVQ AX, R15
+	MOVQ DX, R14
+
+	// r4 += a1×b3
+	MOVQ 8(CX), AX
+	MULQ 24(BX)
+	ADDQ AX, R15
+	ADCQ DX, R14
+
+	// r4 += a2×b2
+	MOVQ 16(CX), AX
+	MULQ 16(BX)
+	ADDQ AX, R15
+	ADCQ DX, R14
+
+	// r4 += a3×b1
+	MOVQ 24(CX), AX
+	MULQ 8(BX)
+	ADDQ AX, R15
+	ADCQ DX, R14
+
+	// r4 += a4×b0
+	MOVQ 32(CX), AX
+	MULQ (BX)
+	ADDQ AX, R15
+	ADCQ DX, R14
+
+	// First reduction chain
+	MOVQ   $0x0007ffffffffffff, AX
+	SHLQ   $0x0d, DI, SI
+	SHLQ   $0x0d, R9, R8
+	SHLQ   $0x0d, R11, R10
+	SHLQ   $0x0d, R13, R12
+	SHLQ   $0x0d, R15, R14
+	ANDQ   AX, DI
+	IMUL3Q $0x13, R14, R14
+	ADDQ   R14, DI
+	ANDQ   AX, R9
+	ADDQ   SI, R9
+	ANDQ   AX, R11
+	ADDQ   R8, R11
+	ANDQ   AX, R13
+	ADDQ   R10, R13
+	ANDQ   AX, R15
+	ADDQ   R12, R15
+
+	// Second reduction chain (carryPropagate)
+	MOVQ   DI, SI
+	SHRQ   $0x33, SI
+	MOVQ   R9, R8
+	SHRQ   $0x33, R8
+	MOVQ   R11, R10
+	SHRQ   $0x33, R10
+	MOVQ   R13, R12
+	SHRQ   $0x33, R12
+	MOVQ   R15, R14
+	SHRQ   $0x33, R14
+	ANDQ   AX, DI
+	IMUL3Q $0x13, R14, R14
+	ADDQ   R14, DI
+	ANDQ   AX, R9
+	ADDQ   SI, R9
+	ANDQ   AX, R11
+	ADDQ   R8, R11
+	ANDQ   AX, R13
+	ADDQ   R10, R13
+	ANDQ   AX, R15
+	ADDQ   R12, R15
+
+	// Store output
+	MOVQ out+0(FP), AX
+	MOVQ DI, (AX)
+	MOVQ R9, 8(AX)
+	MOVQ R11, 16(AX)
+	MOVQ R13, 24(AX)
+	MOVQ R15, 32(AX)
+	RET
+
+// func feSquare(out *Element, a *Element)
+TEXT ·feSquare(SB), NOSPLIT, $0-16
+	MOVQ a+8(FP), CX
+
+	// r0 = l0×l0
+	MOVQ (CX), AX
+	MULQ (CX)
+	MOVQ AX, SI
+	MOVQ DX, BX
+
+	// r0 += 38×l1×l4
+	MOVQ   8(CX), AX
+	IMUL3Q $0x26, AX, AX
+	MULQ   32(CX)
+	ADDQ   AX, SI
+	ADCQ   DX, BX
+
+	// r0 += 38×l2×l3
+	MOVQ   16(CX), AX
+	IMUL3Q $0x26, AX, AX
+	MULQ   24(CX)
+	ADDQ   AX, SI
+	ADCQ   DX, BX
+
+	// r1 = 2×l0×l1
+	MOVQ (CX), AX
+	SHLQ $0x01, AX
+	MULQ 8(CX)
+	MOVQ AX, R8
+	MOVQ DX, DI
+
+	// r1 += 38×l2×l4
+	MOVQ   16(CX), AX
+	IMUL3Q $0x26, AX, AX
+	MULQ   32(CX)
+	ADDQ   AX, R8
+	ADCQ   DX, DI
+
+	// r1 += 19×l3×l3
+	MOVQ   24(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   24(CX)
+	ADDQ   AX, R8
+	ADCQ   DX, DI
+
+	// r2 = 2×l0×l2
+	MOVQ (CX), AX
+	SHLQ $0x01, AX
+	MULQ 16(CX)
+	MOVQ AX, R10
+	MOVQ DX, R9
+
+	// r2 += l1×l1
+	MOVQ 8(CX), AX
+	MULQ 8(CX)
+	ADDQ AX, R10
+	ADCQ DX, R9
+
+	// r2 += 38×l3×l4
+	MOVQ   24(CX), AX
+	IMUL3Q $0x26, AX, AX
+	MULQ   32(CX)
+	ADDQ   AX, R10
+	ADCQ   DX, R9
+
+	// r3 = 2×l0×l3
+	MOVQ (CX), AX
+	SHLQ $0x01, AX
+	MULQ 24(CX)
+	MOVQ AX, R12
+	MOVQ DX, R11
+
+	// r3 += 2×l1×l2
+	MOVQ   8(CX), AX
+	IMUL3Q $0x02, AX, AX
+	MULQ   16(CX)
+	ADDQ   AX, R12
+	ADCQ   DX, R11
+
+	// r3 += 19×l4×l4
+	MOVQ   32(CX), AX
+	IMUL3Q $0x13, AX, AX
+	MULQ   32(CX)
+	ADDQ   AX, R12
+	ADCQ   DX, R11
+
+	// r4 = 2×l0×l4
+	MOVQ (CX), AX
+	SHLQ $0x01, AX
+	MULQ 32(CX)
+	MOVQ AX, R14
+	MOVQ DX, R13
+
+	// r4 += 2×l1×l3
+	MOVQ   8(CX), AX
+	IMUL3Q $0x02, AX, AX
+	MULQ   24(CX)
+	ADDQ   AX, R14
+	ADCQ   DX, R13
+
+	// r4 += l2×l2
+	MOVQ 16(CX), AX
+	MULQ 16(CX)
+	ADDQ AX, R14
+	ADCQ DX, R13
+
+	// First reduction chain
+	MOVQ   $0x0007ffffffffffff, AX
+	SHLQ   $0x0d, SI, BX
+	SHLQ   $0x0d, R8, DI
+	SHLQ   $0x0d, R10, R9
+	SHLQ   $0x0d, R12, R11
+	SHLQ   $0x0d, R14, R13
+	ANDQ   AX, SI
+	IMUL3Q $0x13, R13, R13
+	ADDQ   R13, SI
+	ANDQ   AX, R8
+	ADDQ   BX, R8
+	ANDQ   AX, R10
+	ADDQ   DI, R10
+	ANDQ   AX, R12
+	ADDQ   R9, R12
+	ANDQ   AX, R14
+	ADDQ   R11, R14
+
+	// Second reduction chain (carryPropagate)
+	MOVQ   SI, BX
+	SHRQ   $0x33, BX
+	MOVQ   R8, DI
+	SHRQ   $0x33, DI
+	MOVQ   R10, R9
+	SHRQ   $0x33, R9
+	MOVQ   R12, R11
+	SHRQ   $0x33, R11
+	MOVQ   R14, R13
+	SHRQ   $0x33, R13
+	ANDQ   AX, SI
+	IMUL3Q $0x13, R13, R13
+	ADDQ   R13, SI
+	ANDQ   AX, R8
+	ADDQ   BX, R8
+	ANDQ   AX, R10
+	ADDQ   DI, R10
+	ANDQ   AX, R12
+	ADDQ   R9, R12
+	ANDQ   AX, R14
+	ADDQ   R11, R14
+
+	// Store output
+	MOVQ out+0(FP), AX
+	MOVQ SI, (AX)
+	MOVQ R8, 8(AX)
+	MOVQ R10, 16(AX)
+	MOVQ R12, 24(AX)
+	MOVQ R14, 32(AX)
+	RET
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
new file mode 100644
index 0000000..ddb6c9b
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+// +build !amd64 !gc purego
+
+package field
+
+func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
+
+func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go
new file mode 100644
index 0000000..af459ef
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+// +build arm64,gc,!purego
+
+package field
+
+//go:noescape
+func carryPropagate(v *Element)
+
+func (v *Element) carryPropagate() *Element {
+	carryPropagate(v)
+	return v
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s
new file mode 100644
index 0000000..3126a43
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s
@@ -0,0 +1,42 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+
+#include "textflag.h"
+
+// carryPropagate works exactly like carryPropagateGeneric and uses the
+// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
+// avoids loading R0-R4 twice and uses LDP and STP.
+//
+// See https://golang.org/issues/43145 for the main compiler issue.
+//
+// func carryPropagate(v *Element)
+TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
+	MOVD v+0(FP), R20
+
+	LDP 0(R20), (R0, R1)
+	LDP 16(R20), (R2, R3)
+	MOVD 32(R20), R4
+
+	AND $0x7ffffffffffff, R0, R10
+	AND $0x7ffffffffffff, R1, R11
+	AND $0x7ffffffffffff, R2, R12
+	AND $0x7ffffffffffff, R3, R13
+	AND $0x7ffffffffffff, R4, R14
+
+	ADD R0>>51, R11, R11
+	ADD R1>>51, R12, R12
+	ADD R2>>51, R13, R13
+	ADD R3>>51, R14, R14
+	// R4>>51 * 19 + R10 -> R10
+	LSR $51, R4, R21
+	MOVD $19, R22
+	MADD R22, R10, R21, R10
+
+	STP (R10, R11), 0(R20)
+	STP (R12, R13), 16(R20)
+	MOVD R14, 32(R20)
+
+	RET
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
new file mode 100644
index 0000000..234a5b2
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 || !gc || purego
+// +build !arm64 !gc purego
+
+package field
+
+func (v *Element) carryPropagate() *Element {
+	return v.carryPropagateGeneric()
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go
new file mode 100644
index 0000000..1ef503b
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_extra.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "errors"
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/ed25519/edwards25519/field package.
+
+// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
+// is reduced modulo the field order. If x is not of the right length,
+// SetWideBytes returns nil and an error, and the receiver is unchanged.
+//
+// SetWideBytes is not necessary to select a uniformly distributed value, and is
+// only provided for compatibility: SetBytes can be used instead as the chance
+// of bias is less than 2⁻²⁵⁰.
+func (v *Element) SetWideBytes(x []byte) (*Element, error) {
+	if len(x) != 64 {
+		return nil, errors.New("edwards25519: invalid SetWideBytes input size")
+	}
+
+	// Split the 64 bytes into two elements, and extract the most significant
+	// bit of each, which is ignored by SetBytes.
+	lo, _ := new(Element).SetBytes(x[:32])
+	loMSB := uint64(x[31] >> 7)
+	hi, _ := new(Element).SetBytes(x[32:])
+	hiMSB := uint64(x[63] >> 7)
+
+	// The output we want is
+	//
+	//   v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
+	//
+	// which applying the reduction identity comes out to
+	//
+	//   v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
+	//
+	// l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
+	// (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
+	// (hiMSB * 2 * 19²), so it fits in a uint64.
+
+	v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
+	v.l1 = lo.l1 + hi.l1*2*19
+	v.l2 = lo.l2 + hi.l2*2*19
+	v.l3 = lo.l3 + hi.l3*2*19
+	v.l4 = lo.l4 + hi.l4*2*19
+
+	return v.carryPropagate(), nil
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go
new file mode 100644
index 0000000..86f5fd9
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_generic.go
@@ -0,0 +1,266 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "math/bits"
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+	lo, hi uint64
+}
+
+// mul64 returns a * b.
+func mul64(a, b uint64) uint128 {
+	hi, lo := bits.Mul64(a, b)
+	return uint128{lo, hi}
+}
+
+// addMul64 returns v + a * b.
+func addMul64(v uint128, a, b uint64) uint128 {
+	hi, lo := bits.Mul64(a, b)
+	lo, c := bits.Add64(lo, v.lo, 0)
+	hi, _ = bits.Add64(hi, v.hi, c)
+	return uint128{lo, hi}
+}
+
+// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
+func shiftRightBy51(a uint128) uint64 {
+	return (a.hi << (64 - 51)) | (a.lo >> 51)
+}
+
+func feMulGeneric(v, a, b *Element) {
+	a0 := a.l0
+	a1 := a.l1
+	a2 := a.l2
+	a3 := a.l3
+	a4 := a.l4
+
+	b0 := b.l0
+	b1 := b.l1
+	b2 := b.l2
+	b3 := b.l3
+	b4 := b.l4
+
+	// Limb multiplication works like pen-and-paper columnar multiplication, but
+	// with 51-bit limbs instead of digits.
+	//
+	//                          a4   a3   a2   a1   a0  x
+	//                          b4   b3   b2   b1   b0  =
+	//                         ------------------------
+	//                        a4b0 a3b0 a2b0 a1b0 a0b0  +
+	//                   a4b1 a3b1 a2b1 a1b1 a0b1       +
+	//              a4b2 a3b2 a2b2 a1b2 a0b2            +
+	//         a4b3 a3b3 a2b3 a1b3 a0b3                 +
+	//    a4b4 a3b4 a2b4 a1b4 a0b4                      =
+	//   ----------------------------------------------
+	//      r8   r7   r6   r5   r4   r3   r2   r1   r0
+	//
+	// We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
+	// reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
+	// r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
+	//
+	// Reduction can be carried out simultaneously to multiplication. For
+	// example, we do not compute r5: whenever the result of a multiplication
+	// belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
+	//
+	//            a4b0    a3b0    a2b0    a1b0    a0b0  +
+	//            a3b1    a2b1    a1b1    a0b1 19×a4b1  +
+	//            a2b2    a1b2    a0b2 19×a4b2 19×a3b2  +
+	//            a1b3    a0b3 19×a4b3 19×a3b3 19×a2b3  +
+	//            a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4  =
+	//           --------------------------------------
+	//              r4      r3      r2      r1      r0
+	//
+	// Finally we add up the columns into wide, overlapping limbs.
+
+	a1_19 := a1 * 19
+	a2_19 := a2 * 19
+	a3_19 := a3 * 19
+	a4_19 := a4 * 19
+
+	// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+	r0 := mul64(a0, b0)
+	r0 = addMul64(r0, a1_19, b4)
+	r0 = addMul64(r0, a2_19, b3)
+	r0 = addMul64(r0, a3_19, b2)
+	r0 = addMul64(r0, a4_19, b1)
+
+	// r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+	r1 := mul64(a0, b1)
+	r1 = addMul64(r1, a1, b0)
+	r1 = addMul64(r1, a2_19, b4)
+	r1 = addMul64(r1, a3_19, b3)
+	r1 = addMul64(r1, a4_19, b2)
+
+	// r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+	r2 := mul64(a0, b2)
+	r2 = addMul64(r2, a1, b1)
+	r2 = addMul64(r2, a2, b0)
+	r2 = addMul64(r2, a3_19, b4)
+	r2 = addMul64(r2, a4_19, b3)
+
+	// r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+	r3 := mul64(a0, b3)
+	r3 = addMul64(r3, a1, b2)
+	r3 = addMul64(r3, a2, b1)
+	r3 = addMul64(r3, a3, b0)
+	r3 = addMul64(r3, a4_19, b4)
+
+	// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+	r4 := mul64(a0, b4)
+	r4 = addMul64(r4, a1, b3)
+	r4 = addMul64(r4, a2, b2)
+	r4 = addMul64(r4, a3, b1)
+	r4 = addMul64(r4, a4, b0)
+
+	// After the multiplication, we need to reduce (carry) the five coefficients
+	// to obtain a result with limbs that are at most slightly larger than 2⁵¹,
+	// to respect the Element invariant.
+	//
+	// Overall, the reduction works the same as carryPropagate, except with
+	// wider inputs: we take the carry for each coefficient by shifting it right
+	// by 51, and add it to the limb above it. The top carry is multiplied by 19
+	// according to the reduction identity and added to the lowest limb.
+	//
+	// The largest coefficient (r0) will be at most 111 bits, which guarantees
+	// that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
+	//
+	//     r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+	//     r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
+	//     r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
+	//     r0 < 2⁷ × 2⁵² × 2⁵²
+	//     r0 < 2¹¹¹
+	//
+	// Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
+	// 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
+	// allows us to easily apply the reduction identity.
+	//
+	//     r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+	//     r4 < 5 × 2⁵² × 2⁵²
+	//     r4 < 2¹⁰⁷
+	//
+
+	c0 := shiftRightBy51(r0)
+	c1 := shiftRightBy51(r1)
+	c2 := shiftRightBy51(r2)
+	c3 := shiftRightBy51(r3)
+	c4 := shiftRightBy51(r4)
+
+	rr0 := r0.lo&maskLow51Bits + c4*19
+	rr1 := r1.lo&maskLow51Bits + c0
+	rr2 := r2.lo&maskLow51Bits + c1
+	rr3 := r3.lo&maskLow51Bits + c2
+	rr4 := r4.lo&maskLow51Bits + c3
+
+	// Now all coefficients fit into 64-bit registers but are still too large to
+	// be passed around as an Element. We therefore do one last carry chain,
+	// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
+	*v = Element{rr0, rr1, rr2, rr3, rr4}
+	v.carryPropagate()
+}
+
+func feSquareGeneric(v, a *Element) {
+	l0 := a.l0
+	l1 := a.l1
+	l2 := a.l2
+	l3 := a.l3
+	l4 := a.l4
+
+	// Squaring works precisely like multiplication above, but thanks to its
+	// symmetry we get to group a few terms together.
+	//
+	//                          l4   l3   l2   l1   l0  x
+	//                          l4   l3   l2   l1   l0  =
+	//                         ------------------------
+	//                        l4l0 l3l0 l2l0 l1l0 l0l0  +
+	//                   l4l1 l3l1 l2l1 l1l1 l0l1       +
+	//              l4l2 l3l2 l2l2 l1l2 l0l2            +
+	//         l4l3 l3l3 l2l3 l1l3 l0l3                 +
+	//    l4l4 l3l4 l2l4 l1l4 l0l4                      =
+	//   ----------------------------------------------
+	//      r8   r7   r6   r5   r4   r3   r2   r1   r0
+	//
+	//            l4l0    l3l0    l2l0    l1l0    l0l0  +
+	//            l3l1    l2l1    l1l1    l0l1 19×l4l1  +
+	//            l2l2    l1l2    l0l2 19×l4l2 19×l3l2  +
+	//            l1l3    l0l3 19×l4l3 19×l3l3 19×l2l3  +
+	//            l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4  =
+	//           --------------------------------------
+	//              r4      r3      r2      r1      r0
+	//
+	// With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
+	// only three Mul64 and four Add64, instead of five and eight.
+
+	l0_2 := l0 * 2
+	l1_2 := l1 * 2
+
+	l1_38 := l1 * 38
+	l2_38 := l2 * 38
+	l3_38 := l3 * 38
+
+	l3_19 := l3 * 19
+	l4_19 := l4 * 19
+
+	// r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
+	r0 := mul64(l0, l0)
+	r0 = addMul64(r0, l1_38, l4)
+	r0 = addMul64(r0, l2_38, l3)
+
+	// r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+	r1 := mul64(l0_2, l1)
+	r1 = addMul64(r1, l2_38, l4)
+	r1 = addMul64(r1, l3_19, l3)
+
+	// r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+	r2 := mul64(l0_2, l2)
+	r2 = addMul64(r2, l1, l1)
+	r2 = addMul64(r2, l3_38, l4)
+
+	// r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+	r3 := mul64(l0_2, l3)
+	r3 = addMul64(r3, l1_2, l2)
+	r3 = addMul64(r3, l4_19, l4)
+
+	// r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
+	r4 := mul64(l0_2, l4)
+	r4 = addMul64(r4, l1_2, l3)
+	r4 = addMul64(r4, l2, l2)
+
+	c0 := shiftRightBy51(r0)
+	c1 := shiftRightBy51(r1)
+	c2 := shiftRightBy51(r2)
+	c3 := shiftRightBy51(r3)
+	c4 := shiftRightBy51(r4)
+
+	rr0 := r0.lo&maskLow51Bits + c4*19
+	rr1 := r1.lo&maskLow51Bits + c0
+	rr2 := r2.lo&maskLow51Bits + c1
+	rr3 := r3.lo&maskLow51Bits + c2
+	rr4 := r4.lo&maskLow51Bits + c3
+
+	*v = Element{rr0, rr1, rr2, rr3, rr4}
+	v.carryPropagate()
+}
+
+// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
+func (v *Element) carryPropagateGeneric() *Element {
+	c0 := v.l0 >> 51
+	c1 := v.l1 >> 51
+	c2 := v.l2 >> 51
+	c3 := v.l3 >> 51
+	c4 := v.l4 >> 51
+
+	// c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+	// the final l0 will be at most 52 bits. Similarly for the rest.
+	v.l0 = v.l0&maskLow51Bits + c4*19
+	v.l1 = v.l1&maskLow51Bits + c0
+	v.l2 = v.l2&maskLow51Bits + c1
+	v.l3 = v.l3&maskLow51Bits + c2
+	v.l4 = v.l4&maskLow51Bits + c3
+
+	return v
+}