+
+#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
+#define Maj(x,y,z) (((x) & (y)) | ((z) & ((x) | (y))))
+#define Sigma0(x) (((x) >> 2 | (x) << 30) ^ ((x) >> 13 | (x) << 19) ^ ((x) >> 22 | (x) << 10))
+#define Sigma1(x) (((x) >> 6 | (x) << 26) ^ ((x) >> 11 | (x) << 21) ^ ((x) >> 25 | (x) << 7))
+#define sigma0(x) (((x) >> 7 | (x) << 25) ^ ((x) >> 18 | (x) << 14) ^ ((x) >> 3))
+#define sigma1(x) (((x) >> 17 | (x) << 15) ^ ((x) >> 19 | (x) << 13) ^ ((x) >> 10))
+
+#define Round(a,b,c,d,e,f,g,h,k,w) do { \
+ uint32_t t1 = (h) + Sigma1(e) + Ch((e), (f), (g)) + (k) + (w); \
+ uint32_t t2 = Sigma0(a) + Maj((a), (b), (c)); \
+ (d) += t1; \
+ (h) = t1 + t2; \
+} while(0)
+
+#ifdef WORDS_BIGENDIAN
+#define BE32(x) (x)
+#else
+#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
+#endif
+
+static void secp256k1_sha256_initialize(secp256k1_sha256 *hash) {
+ hash->s[0] = 0x6a09e667ul;
+ hash->s[1] = 0xbb67ae85ul;
+ hash->s[2] = 0x3c6ef372ul;
+ hash->s[3] = 0xa54ff53aul;
+ hash->s[4] = 0x510e527ful;
+ hash->s[5] = 0x9b05688cul;
+ hash->s[6] = 0x1f83d9abul;
+ hash->s[7] = 0x5be0cd19ul;
+ hash->bytes = 0;
+}
+
+/** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */
+static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) {
+ uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7];
+ uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
+
+ Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = BE32(chunk[0]));
+ Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = BE32(chunk[1]));
+ Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = BE32(chunk[2]));
+ Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = BE32(chunk[3]));
+ Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = BE32(chunk[4]));
+ Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = BE32(chunk[5]));
+ Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = BE32(chunk[6]));
+ Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = BE32(chunk[7]));
+ Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = BE32(chunk[8]));
+ Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = BE32(chunk[9]));
+ Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = BE32(chunk[10]));
+ Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = BE32(chunk[11]));
+ Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = BE32(chunk[12]));
+ Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = BE32(chunk[13]));
+ Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = BE32(chunk[14]));
+ Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = BE32(chunk[15]));
+
+ Round(a, b, c, d, e, f, g, h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x0fc19dc6, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x240ca1cc, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x2de92c6f, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x4a7484aa, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x5cb0a9dc, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x76f988da, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0x983e5152, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0xa831c66d, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0xb00327c8, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0xbf597fc7, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0xc6e00bf3, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xd5a79147, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0x06ca6351, w14 += sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0x14292967, w15 += sigma1(w13) + w8 + sigma0(w0));
+
+ Round(a, b, c, d, e, f, g, h, 0x27b70a85, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0x2e1b2138, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x4d2c6dfc, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x53380d13, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x650a7354, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x766a0abb, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x81c2c92e, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x92722c85, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0xa2bfe8a1, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0xa81a664b, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0xc24b8b70, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0xc76c51a3, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0xd192e819, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xd6990624, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0xf40e3585, w14 += sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0x106aa070, w15 += sigma1(w13) + w8 + sigma0(w0));
+
+ Round(a, b, c, d, e, f, g, h, 0x19a4c116, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0x1e376c08, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x2748774c, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x34b0bcb5, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x391c0cb3, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x4ed8aa4a, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x5b9cca4f, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x682e6ff3, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0x748f82ee, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0x78a5636f, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0x84c87814, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0x8cc70208, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0x90befffa, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xa4506ceb, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0xbef9a3f7, w14 + sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0xc67178f2, w15 + sigma1(w13) + w8 + sigma0(w0));
+
+ s[0] += a;
+ s[1] += b;
+ s[2] += c;
+ s[3] += d;
+ s[4] += e;
+ s[5] += f;
+ s[6] += g;
+ s[7] += h;
+}
+
+static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char *data, size_t len) {
+ size_t bufsize = hash->bytes & 0x3F;
+ hash->bytes += len;
+ while (bufsize + len >= 64) {
+ /* Fill the buffer, and process it. */
+ size_t chunk_len = 64 - bufsize;
+ memcpy(((unsigned char*)hash->buf) + bufsize, data, chunk_len);
+ data += chunk_len;
+ len -= chunk_len;
+ secp256k1_sha256_transform(hash->s, hash->buf);
+ bufsize = 0;
+ }
+ if (len) {
+ /* Fill the buffer with what remains. */
+ memcpy(((unsigned char*)hash->buf) + bufsize, data, len);
+ }
+}
+
+static void secp256k1_sha256_finalize(secp256k1_sha256 *hash, unsigned char *out32) {
+ static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t sizedesc[2];
+ uint32_t out[8];
+ int i = 0;
+ sizedesc[0] = BE32(hash->bytes >> 29);
+ sizedesc[1] = BE32(hash->bytes << 3);
+ secp256k1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
+ secp256k1_sha256_write(hash, (const unsigned char*)sizedesc, 8);
+ for (i = 0; i < 8; i++) {
+ out[i] = BE32(hash->s[i]);
+ hash->s[i] = 0;
+ }
+ memcpy(out32, (const unsigned char*)out, 32);
+}
+
+static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256 *hash, const unsigned char *key, size_t keylen) {
+ size_t n;
+ unsigned char rkey[64];
+ if (keylen <= sizeof(rkey)) {
+ memcpy(rkey, key, keylen);
+ memset(rkey + keylen, 0, sizeof(rkey) - keylen);
+ } else {
+ secp256k1_sha256 sha256;
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, key, keylen);
+ secp256k1_sha256_finalize(&sha256, rkey);
+ memset(rkey + 32, 0, 32);
+ }
+
+ secp256k1_sha256_initialize(&hash->outer);
+ for (n = 0; n < sizeof(rkey); n++) {
+ rkey[n] ^= 0x5c;
+ }
+ secp256k1_sha256_write(&hash->outer, rkey, sizeof(rkey));
+
+ secp256k1_sha256_initialize(&hash->inner);
+ for (n = 0; n < sizeof(rkey); n++) {
+ rkey[n] ^= 0x5c ^ 0x36;
+ }
+ secp256k1_sha256_write(&hash->inner, rkey, sizeof(rkey));
+ memset(rkey, 0, sizeof(rkey));
+}
+
+static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256 *hash, const unsigned char *data, size_t size) {
+ secp256k1_sha256_write(&hash->inner, data, size);
+}
+
+static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256 *hash, unsigned char *out32) {
+ unsigned char temp[32];
+ secp256k1_sha256_finalize(&hash->inner, temp);
+ secp256k1_sha256_write(&hash->outer, temp, 32);
+ memset(temp, 0, 32);
+ secp256k1_sha256_finalize(&hash->outer, out32);
+}
+
+
+static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) {
+ secp256k1_hmac_sha256 hmac;
+ static const unsigned char zero[1] = {0x00};
+ static const unsigned char one[1] = {0x01};
+
+ memset(rng->v, 0x01, 32); /* RFC6979 3.2.b. */
+ memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */
+
+ /* RFC6979 3.2.d. */
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, zero, 1);
+ secp256k1_hmac_sha256_write(&hmac, key, keylen);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+
+ /* RFC6979 3.2.f. */
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, one, 1);
+ secp256k1_hmac_sha256_write(&hmac, key, keylen);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ rng->retry = 0;
+}
+
+static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) {
+ /* RFC6979 3.2.h. */
+ static const unsigned char zero[1] = {0x00};
+ if (rng->retry) {
+ secp256k1_hmac_sha256 hmac;
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, zero, 1);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ }
+
+ while (outlen > 0) {
+ secp256k1_hmac_sha256 hmac;
+ int now = outlen;
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ if (now > 32) {
+ now = 32;
+ }
+ memcpy(out, rng->v, now);
+ out += now;
+ outlen -= now;
+ }
+
+ rng->retry = 1;
+}
+
+static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256 *rng) {
+ memset(rng->k, 0, 32);
+ memset(rng->v, 0, 32);
+ rng->retry = 0;
+}
+
+#undef BE32
+#undef Round
+#undef sigma1
+#undef sigma0
+#undef Sigma1
+#undef Sigma0
+#undef Maj
+#undef Ch
+
+#endif /* SECP256K1_HASH_IMPL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java
new file mode 100644
index 0000000..1c67802
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java
@@ -0,0 +1,446 @@
+/*
+ * Copyright 2013 Google Inc.
+ * Copyright 2014-2016 the libsecp256k1 contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.bitcoin;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import java.math.BigInteger;
+import com.google.common.base.Preconditions;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import static org.bitcoin.NativeSecp256k1Util.*;
+
+/**
+ * This class holds native methods to handle ECDSA verification.
+ *
+ * You can find an example library that can be used for this at https://github.com/bitcoin/secp256k1
+ *
+ * To build secp256k1 for use with bitcoinj, run
+ * `./configure --enable-jni --enable-experimental --enable-module-ecdh`
+ * and `make` then copy `.libs/libsecp256k1.so` to your system library path
+ * or point the JVM to the folder containing it with -Djava.library.path
+ *
+ */
+public class NativeSecp256k1 {
+
+ private static final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
+ private static final Lock r = rwl.readLock();
+ private static final Lock w = rwl.writeLock();
+ private static ThreadLocal nativeECDSABuffer = new ThreadLocal();
+ /**
+ * Verifies the given secp256k1 signature in native code.
+ * Calling when enabled == false is undefined (probably library not loaded)
+ *
+ * @param data The data which was signed, must be exactly 32 bytes
+ * @param signature The signature
+ * @param pub The public key which did the signing
+ */
+ public static boolean verify(byte[] data, byte[] signature, byte[] pub) throws AssertFailException{
+ Preconditions.checkArgument(data.length == 32 && signature.length <= 520 && pub.length <= 520);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < 520) {
+ byteBuff = ByteBuffer.allocateDirect(520);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(data);
+ byteBuff.put(signature);
+ byteBuff.put(pub);
+
+ byte[][] retByteArray;
+
+ r.lock();
+ try {
+ return secp256k1_ecdsa_verify(byteBuff, Secp256k1Context.getContext(), signature.length, pub.length) == 1;
+ } finally {
+ r.unlock();
+ }
+ }
+
+ /**
+ * libsecp256k1 Create an ECDSA signature.
+ *
+ * @param data Message hash, 32 bytes
+ * @param key Secret key, 32 bytes
+ *
+ * Return values
+ * @param sig byte array of signature
+ */
+ public static byte[] sign(byte[] data, byte[] sec) throws AssertFailException{
+ Preconditions.checkArgument(data.length == 32 && sec.length <= 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < 32 + 32) {
+ byteBuff = ByteBuffer.allocateDirect(32 + 32);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(data);
+ byteBuff.put(sec);
+
+ byte[][] retByteArray;
+
+ r.lock();
+ try {
+ retByteArray = secp256k1_ecdsa_sign(byteBuff, Secp256k1Context.getContext());
+ } finally {
+ r.unlock();
+ }
+
+ byte[] sigArr = retByteArray[0];
+ int sigLen = new BigInteger(new byte[] { retByteArray[1][0] }).intValue();
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(sigArr.length, sigLen, "Got bad signature length.");
+
+ return retVal == 0 ? new byte[0] : sigArr;
+ }
+
+ /**
+ * libsecp256k1 Seckey Verify - returns 1 if valid, 0 if invalid
+ *
+ * @param seckey ECDSA Secret key, 32 bytes
+ */
+ public static boolean secKeyVerify(byte[] seckey) {
+ Preconditions.checkArgument(seckey.length == 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < seckey.length) {
+ byteBuff = ByteBuffer.allocateDirect(seckey.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(seckey);
+
+ r.lock();
+ try {
+ return secp256k1_ec_seckey_verify(byteBuff,Secp256k1Context.getContext()) == 1;
+ } finally {
+ r.unlock();
+ }
+ }
+
+
+ /**
+ * libsecp256k1 Compute Pubkey - computes public key from secret key
+ *
+ * @param seckey ECDSA Secret key, 32 bytes
+ *
+ * Return values
+ * @param pubkey ECDSA Public key, 33 or 65 bytes
+ */
+ //TODO add a 'compressed' arg
+ public static byte[] computePubkey(byte[] seckey) throws AssertFailException{
+ Preconditions.checkArgument(seckey.length == 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < seckey.length) {
+ byteBuff = ByteBuffer.allocateDirect(seckey.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(seckey);
+
+ byte[][] retByteArray;
+
+ r.lock();
+ try {
+ retByteArray = secp256k1_ec_pubkey_create(byteBuff, Secp256k1Context.getContext());
+ } finally {
+ r.unlock();
+ }
+
+ byte[] pubArr = retByteArray[0];
+ int pubLen = new BigInteger(new byte[] { retByteArray[1][0] }).intValue();
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(pubArr.length, pubLen, "Got bad pubkey length.");
+
+ return retVal == 0 ? new byte[0]: pubArr;
+ }
+
+ /**
+ * libsecp256k1 Cleanup - This destroys the secp256k1 context object
+ * This should be called at the end of the program for proper cleanup of the context.
+ */
+ public static synchronized void cleanup() {
+ w.lock();
+ try {
+ secp256k1_destroy_context(Secp256k1Context.getContext());
+ } finally {
+ w.unlock();
+ }
+ }
+
+ public static long cloneContext() {
+ r.lock();
+ try {
+ return secp256k1_ctx_clone(Secp256k1Context.getContext());
+ } finally { r.unlock(); }
+ }
+
+ /**
+ * libsecp256k1 PrivKey Tweak-Mul - Tweak privkey by multiplying to it
+ *
+ * @param tweak some bytes to tweak with
+ * @param seckey 32-byte seckey
+ */
+ public static byte[] privKeyTweakMul(byte[] privkey, byte[] tweak) throws AssertFailException{
+ Preconditions.checkArgument(privkey.length == 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < privkey.length + tweak.length) {
+ byteBuff = ByteBuffer.allocateDirect(privkey.length + tweak.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(privkey);
+ byteBuff.put(tweak);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_privkey_tweak_mul(byteBuff,Secp256k1Context.getContext());
+ } finally {
+ r.unlock();
+ }
+
+ byte[] privArr = retByteArray[0];
+
+ int privLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF;
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(privArr.length, privLen, "Got bad pubkey length.");
+
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return privArr;
+ }
+
+ /**
+ * libsecp256k1 PrivKey Tweak-Add - Tweak privkey by adding to it
+ *
+ * @param tweak some bytes to tweak with
+ * @param seckey 32-byte seckey
+ */
+ public static byte[] privKeyTweakAdd(byte[] privkey, byte[] tweak) throws AssertFailException{
+ Preconditions.checkArgument(privkey.length == 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < privkey.length + tweak.length) {
+ byteBuff = ByteBuffer.allocateDirect(privkey.length + tweak.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(privkey);
+ byteBuff.put(tweak);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_privkey_tweak_add(byteBuff,Secp256k1Context.getContext());
+ } finally {
+ r.unlock();
+ }
+
+ byte[] privArr = retByteArray[0];
+
+ int privLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF;
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(privArr.length, privLen, "Got bad pubkey length.");
+
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return privArr;
+ }
+
+ /**
+ * libsecp256k1 PubKey Tweak-Add - Tweak pubkey by adding to it
+ *
+ * @param tweak some bytes to tweak with
+ * @param pubkey 32-byte seckey
+ */
+ public static byte[] pubKeyTweakAdd(byte[] pubkey, byte[] tweak) throws AssertFailException{
+ Preconditions.checkArgument(pubkey.length == 33 || pubkey.length == 65);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < pubkey.length + tweak.length) {
+ byteBuff = ByteBuffer.allocateDirect(pubkey.length + tweak.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(pubkey);
+ byteBuff.put(tweak);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_pubkey_tweak_add(byteBuff,Secp256k1Context.getContext(), pubkey.length);
+ } finally {
+ r.unlock();
+ }
+
+ byte[] pubArr = retByteArray[0];
+
+ int pubLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF;
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(pubArr.length, pubLen, "Got bad pubkey length.");
+
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return pubArr;
+ }
+
+ /**
+ * libsecp256k1 PubKey Tweak-Mul - Tweak pubkey by multiplying to it
+ *
+ * @param tweak some bytes to tweak with
+ * @param pubkey 32-byte seckey
+ */
+ public static byte[] pubKeyTweakMul(byte[] pubkey, byte[] tweak) throws AssertFailException{
+ Preconditions.checkArgument(pubkey.length == 33 || pubkey.length == 65);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < pubkey.length + tweak.length) {
+ byteBuff = ByteBuffer.allocateDirect(pubkey.length + tweak.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(pubkey);
+ byteBuff.put(tweak);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_pubkey_tweak_mul(byteBuff,Secp256k1Context.getContext(), pubkey.length);
+ } finally {
+ r.unlock();
+ }
+
+ byte[] pubArr = retByteArray[0];
+
+ int pubLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF;
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(pubArr.length, pubLen, "Got bad pubkey length.");
+
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return pubArr;
+ }
+
+ /**
+ * libsecp256k1 create ECDH secret - constant time ECDH calculation
+ *
+ * @param seckey byte array of secret key used in exponentiaion
+ * @param pubkey byte array of public key used in exponentiaion
+ */
+ public static byte[] createECDHSecret(byte[] seckey, byte[] pubkey) throws AssertFailException{
+ Preconditions.checkArgument(seckey.length <= 32 && pubkey.length <= 65);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < 32 + pubkey.length) {
+ byteBuff = ByteBuffer.allocateDirect(32 + pubkey.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(seckey);
+ byteBuff.put(pubkey);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_ecdh(byteBuff, Secp256k1Context.getContext(), pubkey.length);
+ } finally {
+ r.unlock();
+ }
+
+ byte[] resArr = retByteArray[0];
+ int retVal = new BigInteger(new byte[] { retByteArray[1][0] }).intValue();
+
+ assertEquals(resArr.length, 32, "Got bad result length.");
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return resArr;
+ }
+
+ /**
+ * libsecp256k1 randomize - updates the context randomization
+ *
+ * @param seed 32-byte random seed
+ */
+ public static synchronized boolean randomize(byte[] seed) throws AssertFailException{
+ Preconditions.checkArgument(seed.length == 32 || seed == null);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < seed.length) {
+ byteBuff = ByteBuffer.allocateDirect(seed.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(seed);
+
+ w.lock();
+ try {
+ return secp256k1_context_randomize(byteBuff, Secp256k1Context.getContext()) == 1;
+ } finally {
+ w.unlock();
+ }
+ }
+
+ private static native long secp256k1_ctx_clone(long context);
+
+ private static native int secp256k1_context_randomize(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_privkey_tweak_add(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_privkey_tweak_mul(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_pubkey_tweak_add(ByteBuffer byteBuff, long context, int pubLen);
+
+ private static native byte[][] secp256k1_pubkey_tweak_mul(ByteBuffer byteBuff, long context, int pubLen);
+
+ private static native void secp256k1_destroy_context(long context);
+
+ private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff, long context, int sigLen, int pubLen);
+
+ private static native byte[][] secp256k1_ecdsa_sign(ByteBuffer byteBuff, long context);
+
+ private static native int secp256k1_ec_seckey_verify(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_ec_pubkey_create(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen);
+
+ private static native byte[][] secp256k1_ecdh(ByteBuffer byteBuff, long context, int inputLen);
+
+}
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java
new file mode 100644
index 0000000..d766a10
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java
@@ -0,0 +1,226 @@
+package org.bitcoin;
+
+import com.google.common.io.BaseEncoding;
+import java.util.Arrays;
+import java.math.BigInteger;
+import javax.xml.bind.DatatypeConverter;
+import static org.bitcoin.NativeSecp256k1Util.*;
+
+/**
+ * This class holds test cases defined for testing this library.
+ */
+public class NativeSecp256k1Test {
+
+ //TODO improve comments/add more tests
+ /**
+ * This tests verify() for a valid signature
+ */
+ public static void testVerifyPos() throws AssertFailException{
+ boolean result = false;
+ byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing"
+ byte[] sig = BaseEncoding.base16().lowerCase().decode("3044022079BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980220294F14E883B3F525B5367756C2A11EF6CF84B730B36C17CB0C56F0AAB2C98589".toLowerCase());
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+
+ result = NativeSecp256k1.verify( data, sig, pub);
+ assertEquals( result, true , "testVerifyPos");
+ }
+
+ /**
+ * This tests verify() for a non-valid signature
+ */
+ public static void testVerifyNeg() throws AssertFailException{
+ boolean result = false;
+ byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A91".toLowerCase()); //sha256hash of "testing"
+ byte[] sig = BaseEncoding.base16().lowerCase().decode("3044022079BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980220294F14E883B3F525B5367756C2A11EF6CF84B730B36C17CB0C56F0AAB2C98589".toLowerCase());
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+
+ result = NativeSecp256k1.verify( data, sig, pub);
+ //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16));
+ assertEquals( result, false , "testVerifyNeg");
+ }
+
+ /**
+ * This tests secret key verify() for a valid secretkey
+ */
+ public static void testSecKeyVerifyPos() throws AssertFailException{
+ boolean result = false;
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+
+ result = NativeSecp256k1.secKeyVerify( sec );
+ //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16));
+ assertEquals( result, true , "testSecKeyVerifyPos");
+ }
+
+ /**
+ * This tests secret key verify() for an invalid secretkey
+ */
+ public static void testSecKeyVerifyNeg() throws AssertFailException{
+ boolean result = false;
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase());
+
+ result = NativeSecp256k1.secKeyVerify( sec );
+ //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16));
+ assertEquals( result, false , "testSecKeyVerifyNeg");
+ }
+
+ /**
+ * This tests public key create() for a valid secretkey
+ */
+ public static void testPubKeyCreatePos() throws AssertFailException{
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.computePubkey( sec);
+ String pubkeyString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( pubkeyString , "04C591A8FF19AC9C4E4E5793673B83123437E975285E7B442F4EE2654DFFCA5E2D2103ED494718C697AC9AEBCFD19612E224DB46661011863ED2FC54E71861E2A6" , "testPubKeyCreatePos");
+ }
+
+ /**
+ * This tests public key create() for a invalid secretkey
+ */
+ public static void testPubKeyCreateNeg() throws AssertFailException{
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.computePubkey( sec);
+ String pubkeyString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( pubkeyString, "" , "testPubKeyCreateNeg");
+ }
+
+ /**
+ * This tests sign() for a valid secretkey
+ */
+ public static void testSignPos() throws AssertFailException{
+
+ byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing"
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.sign(data, sec);
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString, "30440220182A108E1448DC8F1FB467D06A0F3BB8EA0533584CB954EF8DA112F1D60E39A202201C66F36DA211C087F3AF88B50EDF4F9BDAA6CF5FD6817E74DCA34DB12390C6E9" , "testSignPos");
+ }
+
+ /**
+ * This tests sign() for a invalid secretkey
+ */
+ public static void testSignNeg() throws AssertFailException{
+ byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing"
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.sign(data, sec);
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString, "" , "testSignNeg");
+ }
+
+ /**
+ * This tests private key tweak-add
+ */
+ public static void testPrivKeyTweakAdd_1() throws AssertFailException {
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+ byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak"
+
+ byte[] resultArr = NativeSecp256k1.privKeyTweakAdd( sec , data );
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString , "A168571E189E6F9A7E2D657A4B53AE99B909F7E712D1C23CED28093CD57C88F3" , "testPrivKeyAdd_1");
+ }
+
+ /**
+ * This tests private key tweak-mul
+ */
+ public static void testPrivKeyTweakMul_1() throws AssertFailException {
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+ byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak"
+
+ byte[] resultArr = NativeSecp256k1.privKeyTweakMul( sec , data );
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString , "97F8184235F101550F3C71C927507651BD3F1CDB4A5A33B8986ACF0DEE20FFFC" , "testPrivKeyMul_1");
+ }
+
+ /**
+ * This tests private key tweak-add uncompressed
+ */
+ public static void testPrivKeyTweakAdd_2() throws AssertFailException {
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+ byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak"
+
+ byte[] resultArr = NativeSecp256k1.pubKeyTweakAdd( pub , data );
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString , "0411C6790F4B663CCE607BAAE08C43557EDC1A4D11D88DFCB3D841D0C6A941AF525A268E2A863C148555C48FB5FBA368E88718A46E205FABC3DBA2CCFFAB0796EF" , "testPrivKeyAdd_2");
+ }
+
+ /**
+ * This tests private key tweak-mul uncompressed
+ */
+ public static void testPrivKeyTweakMul_2() throws AssertFailException {
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+ byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak"
+
+ byte[] resultArr = NativeSecp256k1.pubKeyTweakMul( pub , data );
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString , "04E0FE6FE55EBCA626B98A807F6CAF654139E14E5E3698F01A9A658E21DC1D2791EC060D4F412A794D5370F672BC94B722640B5F76914151CFCA6E712CA48CC589" , "testPrivKeyMul_2");
+ }
+
+ /**
+ * This tests seed randomization
+ */
+ public static void testRandomize() throws AssertFailException {
+ byte[] seed = BaseEncoding.base16().lowerCase().decode("A441B15FE9A3CF56661190A0B93B9DEC7D04127288CC87250967CF3B52894D11".toLowerCase()); //sha256hash of "random"
+ boolean result = NativeSecp256k1.randomize(seed);
+ assertEquals( result, true, "testRandomize");
+ }
+
+ public static void testCreateECDHSecret() throws AssertFailException{
+
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.createECDHSecret(sec, pub);
+ String ecdhString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( ecdhString, "2A2A67007A926E6594AF3EB564FC74005B37A9C8AEF2033C4552051B5C87F043" , "testCreateECDHSecret");
+ }
+
+ public static void main(String[] args) throws AssertFailException{
+
+
+ System.out.println("\n libsecp256k1 enabled: " + Secp256k1Context.isEnabled() + "\n");
+
+ assertEquals( Secp256k1Context.isEnabled(), true, "isEnabled" );
+
+ //Test verify() success/fail
+ testVerifyPos();
+ testVerifyNeg();
+
+ //Test secKeyVerify() success/fail
+ testSecKeyVerifyPos();
+ testSecKeyVerifyNeg();
+
+ //Test computePubkey() success/fail
+ testPubKeyCreatePos();
+ testPubKeyCreateNeg();
+
+ //Test sign() success/fail
+ testSignPos();
+ testSignNeg();
+
+ //Test privKeyTweakAdd() 1
+ testPrivKeyTweakAdd_1();
+
+ //Test privKeyTweakMul() 2
+ testPrivKeyTweakMul_1();
+
+ //Test privKeyTweakAdd() 3
+ testPrivKeyTweakAdd_2();
+
+ //Test privKeyTweakMul() 4
+ testPrivKeyTweakMul_2();
+
+ //Test randomize()
+ testRandomize();
+
+ //Test ECDH
+ testCreateECDHSecret();
+
+ NativeSecp256k1.cleanup();
+
+ System.out.println(" All tests passed." );
+
+ }
+}
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java
new file mode 100644
index 0000000..04732ba
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2014-2016 the libsecp256k1 contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.bitcoin;
+
+public class NativeSecp256k1Util{
+
+ public static void assertEquals( int val, int val2, String message ) throws AssertFailException{
+ if( val != val2 )
+ throw new AssertFailException("FAIL: " + message);
+ }
+
+ public static void assertEquals( boolean val, boolean val2, String message ) throws AssertFailException{
+ if( val != val2 )
+ throw new AssertFailException("FAIL: " + message);
+ else
+ System.out.println("PASS: " + message);
+ }
+
+ public static void assertEquals( String val, String val2, String message ) throws AssertFailException{
+ if( !val.equals(val2) )
+ throw new AssertFailException("FAIL: " + message);
+ else
+ System.out.println("PASS: " + message);
+ }
+
+ public static class AssertFailException extends Exception {
+ public AssertFailException(String message) {
+ super( message );
+ }
+ }
+}
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java
new file mode 100644
index 0000000..216c986
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014-2016 the libsecp256k1 contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.bitcoin;
+
+/**
+ * This class holds the context reference used in native methods
+ * to handle ECDSA operations.
+ */
+public class Secp256k1Context {
+ private static final boolean enabled; //true if the library is loaded
+ private static final long context; //ref to pointer to context obj
+
+ static { //static initializer
+ boolean isEnabled = true;
+ long contextRef = -1;
+ try {
+ System.loadLibrary("secp256k1");
+ contextRef = secp256k1_init_context();
+ } catch (UnsatisfiedLinkError e) {
+ System.out.println("UnsatisfiedLinkError: " + e.toString());
+ isEnabled = false;
+ }
+ enabled = isEnabled;
+ context = contextRef;
+ }
+
+ public static boolean isEnabled() {
+ return enabled;
+ }
+
+ public static long getContext() {
+ if(!enabled) return -1; //sanity check
+ return context;
+ }
+
+ private static native long secp256k1_init_context();
+}
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c
new file mode 100644
index 0000000..bcef7b3
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c
@@ -0,0 +1,377 @@
+#include
+#include
+#include
+#include "org_bitcoin_NativeSecp256k1.h"
+#include "include/secp256k1.h"
+#include "include/secp256k1_ecdh.h"
+#include "include/secp256k1_recovery.h"
+
+
+SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone
+ (JNIEnv* env, jclass classObject, jlong ctx_l)
+{
+ const secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+
+ jlong ctx_clone_l = (uintptr_t) secp256k1_context_clone(ctx);
+
+ (void)classObject;(void)env;
+
+ return ctx_clone_l;
+
+}
+
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+
+ const unsigned char* seed = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+
+ (void)classObject;
+
+ return secp256k1_context_randomize(ctx, seed);
+
+}
+
+SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context
+ (JNIEnv* env, jclass classObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+
+ secp256k1_context_destroy(ctx);
+
+ (void)classObject;(void)env;
+}
+
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint siglen, jint publen)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+
+ unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* sigdata = { (unsigned char*) (data + 32) };
+ const unsigned char* pubdata = { (unsigned char*) (data + siglen + 32) };
+
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pubkey;
+
+ int ret = secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigdata, siglen);
+
+ if( ret ) {
+ ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen);
+
+ if( ret ) {
+ ret = secp256k1_ecdsa_verify(ctx, &sig, data, &pubkey);
+ }
+ }
+
+ (void)classObject;
+
+ return ret;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ unsigned char* secKey = (unsigned char*) (data + 32);
+
+ jobjectArray retArray;
+ jbyteArray sigArray, intsByteArray;
+ unsigned char intsarray[2];
+
+ secp256k1_ecdsa_signature sig[72];
+
+ int ret = secp256k1_ecdsa_sign(ctx, sig, data, secKey, NULL, NULL );
+
+ unsigned char outputSer[72];
+ size_t outputLen = 72;
+
+ if( ret ) {
+ int ret2 = secp256k1_ecdsa_signature_serialize_der(ctx,outputSer, &outputLen, sig ); (void)ret2;
+ }
+
+ intsarray[0] = outputLen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ sigArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, sigArray, 0, outputLen, (jbyte*)outputSer);
+ (*env)->SetObjectArrayElement(env, retArray, 0, sigArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+
+ (void)classObject;
+
+ return secp256k1_ec_seckey_verify(ctx, secKey);
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ const unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+
+ secp256k1_pubkey pubkey;
+
+ jobjectArray retArray;
+ jbyteArray pubkeyArray, intsByteArray;
+ unsigned char intsarray[2];
+
+ int ret = secp256k1_ec_pubkey_create(ctx, &pubkey, secKey);
+
+ unsigned char outputSer[65];
+ size_t outputLen = 65;
+
+ if( ret ) {
+ int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
+ }
+
+ intsarray[0] = outputLen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ pubkeyArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, pubkeyArray, 0, outputLen, (jbyte*)outputSer);
+ (*env)->SetObjectArrayElement(env, retArray, 0, pubkeyArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* tweak = (unsigned char*) (privkey + 32);
+
+ jobjectArray retArray;
+ jbyteArray privArray, intsByteArray;
+ unsigned char intsarray[2];
+
+ int privkeylen = 32;
+
+ int ret = secp256k1_ec_privkey_tweak_add(ctx, privkey, tweak);
+
+ intsarray[0] = privkeylen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ privArray = (*env)->NewByteArray(env, privkeylen);
+ (*env)->SetByteArrayRegion(env, privArray, 0, privkeylen, (jbyte*)privkey);
+ (*env)->SetObjectArrayElement(env, retArray, 0, privArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* tweak = (unsigned char*) (privkey + 32);
+
+ jobjectArray retArray;
+ jbyteArray privArray, intsByteArray;
+ unsigned char intsarray[2];
+
+ int privkeylen = 32;
+
+ int ret = secp256k1_ec_privkey_tweak_mul(ctx, privkey, tweak);
+
+ intsarray[0] = privkeylen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ privArray = (*env)->NewByteArray(env, privkeylen);
+ (*env)->SetByteArrayRegion(env, privArray, 0, privkeylen, (jbyte*)privkey);
+ (*env)->SetObjectArrayElement(env, retArray, 0, privArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+/* secp256k1_pubkey* pubkey = (secp256k1_pubkey*) (*env)->GetDirectBufferAddress(env, byteBufferObject);*/
+ unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* tweak = (unsigned char*) (pkey + publen);
+
+ jobjectArray retArray;
+ jbyteArray pubArray, intsByteArray;
+ unsigned char intsarray[2];
+ unsigned char outputSer[65];
+ size_t outputLen = 65;
+
+ secp256k1_pubkey pubkey;
+ int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen);
+
+ if( ret ) {
+ ret = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, tweak);
+ }
+
+ if( ret ) {
+ int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
+ }
+
+ intsarray[0] = outputLen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ pubArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, pubArray, 0, outputLen, (jbyte*)outputSer);
+ (*env)->SetObjectArrayElement(env, retArray, 0, pubArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* tweak = (unsigned char*) (pkey + publen);
+
+ jobjectArray retArray;
+ jbyteArray pubArray, intsByteArray;
+ unsigned char intsarray[2];
+ unsigned char outputSer[65];
+ size_t outputLen = 65;
+
+ secp256k1_pubkey pubkey;
+ int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen);
+
+ if ( ret ) {
+ ret = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, tweak);
+ }
+
+ if( ret ) {
+ int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
+ }
+
+ intsarray[0] = outputLen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ pubArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, pubArray, 0, outputLen, (jbyte*)outputSer);
+ (*env)->SetObjectArrayElement(env, retArray, 0, pubArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1pubkey_1combine
+ (JNIEnv * env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint numkeys)
+{
+ (void)classObject;(void)env;(void)byteBufferObject;(void)ctx_l;(void)numkeys;
+
+ return 0;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ const unsigned char* secdata = (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* pubdata = (const unsigned char*) (secdata + 32);
+
+ jobjectArray retArray;
+ jbyteArray outArray, intsByteArray;
+ unsigned char intsarray[1];
+ secp256k1_pubkey pubkey;
+ unsigned char nonce_res[32];
+ size_t outputLen = 32;
+
+ int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen);
+
+ if (ret) {
+ ret = secp256k1_ecdh(
+ ctx,
+ nonce_res,
+ &pubkey,
+ secdata
+ );
+ }
+
+ intsarray[0] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ outArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, outArray, 0, 32, (jbyte*)nonce_res);
+ (*env)->SetObjectArrayElement(env, retArray, 0, outArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 1);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 1, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h
new file mode 100644
index 0000000..fe613c9
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h
@@ -0,0 +1,119 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include
+#include "include/secp256k1.h"
+/* Header for class org_bitcoin_NativeSecp256k1 */
+
+#ifndef _Included_org_bitcoin_NativeSecp256k1
+#define _Included_org_bitcoin_NativeSecp256k1
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ctx_clone
+ * Signature: (J)J
+ */
+SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone
+ (JNIEnv *, jclass, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_context_randomize
+ * Signature: (Ljava/nio/ByteBuffer;J)I
+ */
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_privkey_tweak_add
+ * Signature: (Ljava/nio/ByteBuffer;J)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_privkey_tweak_mul
+ * Signature: (Ljava/nio/ByteBuffer;J)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_pubkey_tweak_add
+ * Signature: (Ljava/nio/ByteBuffer;JI)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add
+ (JNIEnv *, jclass, jobject, jlong, jint);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_pubkey_tweak_mul
+ * Signature: (Ljava/nio/ByteBuffer;JI)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul
+ (JNIEnv *, jclass, jobject, jlong, jint);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_destroy_context
+ * Signature: (J)V
+ */
+SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context
+ (JNIEnv *, jclass, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ecdsa_verify
+ * Signature: (Ljava/nio/ByteBuffer;JII)I
+ */
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify
+ (JNIEnv *, jclass, jobject, jlong, jint, jint);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ecdsa_sign
+ * Signature: (Ljava/nio/ByteBuffer;J)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ec_seckey_verify
+ * Signature: (Ljava/nio/ByteBuffer;J)I
+ */
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ec_pubkey_create
+ * Signature: (Ljava/nio/ByteBuffer;J)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ec_pubkey_parse
+ * Signature: (Ljava/nio/ByteBuffer;JI)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1parse
+ (JNIEnv *, jclass, jobject, jlong, jint);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ecdh
+ * Signature: (Ljava/nio/ByteBuffer;JI)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c
new file mode 100644
index 0000000..a52939e
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c
@@ -0,0 +1,15 @@
+#include
+#include
+#include "org_bitcoin_Secp256k1Context.h"
+#include "include/secp256k1.h"
+
+SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context
+ (JNIEnv* env, jclass classObject)
+{
+ secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ (void)classObject;(void)env;
+
+ return (uintptr_t)ctx;
+}
+
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h
new file mode 100644
index 0000000..0d2bc84
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h
@@ -0,0 +1,22 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include
+#include "include/secp256k1.h"
+/* Header for class org_bitcoin_Secp256k1Context */
+
+#ifndef _Included_org_bitcoin_Secp256k1Context
+#define _Included_org_bitcoin_Secp256k1Context
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: org_bitcoin_Secp256k1Context
+ * Method: secp256k1_init_context
+ * Signature: ()J
+ */
+SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context
+ (JNIEnv *, jclass);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.inner_product_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.inner_product_impl.h.swp
new file mode 100644
index 0000000..484ada5
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.inner_product_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.main_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.main_impl.h.swp
new file mode 100644
index 0000000..eb94198
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.main_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.rangeproof_impl.h.swo b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.rangeproof_impl.h.swo
new file mode 100644
index 0000000..ab4e184
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.rangeproof_impl.h.swo differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.rangeproof_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.rangeproof_impl.h.swp
new file mode 100644
index 0000000..9c4b840
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.rangeproof_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.tests_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.tests_impl.h.swp
new file mode 100644
index 0000000..9b7c17d
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.tests_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.util.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.util.h.swp
new file mode 100644
index 0000000..00dbd67
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/.util.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/Makefile.am.include b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/Makefile.am.include
new file mode 100644
index 0000000..1bee88e
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/Makefile.am.include
@@ -0,0 +1,12 @@
+include_HEADERS += include/secp256k1_bulletproofs.h
+noinst_HEADERS += src/modules/bulletproofs/inner_product_impl.h
+noinst_HEADERS += src/modules/bulletproofs/rangeproof_impl.h
+noinst_HEADERS += src/modules/bulletproofs/main_impl.h
+noinst_HEADERS += src/modules/bulletproofs/tests_impl.h
+noinst_HEADERS += src/modules/bulletproofs/util.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_bulletproof
+bench_bulletproof_SOURCES = src/bench_bulletproof.c
+bench_bulletproof_LDADD = libsecp256k1.la $(SECP_LIBS)
+bench_bulletproof_LDFLAGS = -static
+endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/inner_product_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/inner_product_impl.h
new file mode 100644
index 0000000..f8269e3
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/inner_product_impl.h
@@ -0,0 +1,848 @@
+/**********************************************************************
+ * Copyright (c) 2018 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL
+#define SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL
+
+#include "group.h"
+#include "scalar.h"
+
+#include "modules/bulletproofs/main_impl.h"
+#include "modules/bulletproofs/util.h"
+
+#define POPCOUNT(x) (__builtin_popcountl((unsigned long)(x))) /* TODO make these portable */
+#define CTZ(x) (__builtin_ctzl((unsigned long)(x)))
+
+/* Number of scalars that should remain at the end of a recursive proof. The paper
+ * uses 2, by reducing the scalars as far as possible. We stop one recursive step
+ * early, trading two points (L, R) for two scalars, which reduces verification
+ * and prover cost.
+ *
+ * For the most part, all comments assume this value is at 4.
+ */
+#define IP_AB_SCALARS 4
+
+/* Bulletproof inner products consist of the four scalars and `2[log2(n) - 1]` points
+ * `a_1`, `a_2`, `b_1`, `b_2`, `L_i` and `R_i`, where `i` ranges from 0 to `log2(n)-1`.
+ *
+ * The prover takes as input a point `P` and scalar `c`. It proves that there exist
+ * scalars `a_i`, `b_i` for `i` ranging from 0 to `n-1`, such that
+ * `P = sum_i [a_i G_i + b_i H_i]` and `<{a_i}, {b_i}> = c`.
+ * where `G_i` and `H_i` are standard NUMS generators.
+ *
+ * Verification of the proof comes down to a single multiexponentiation of the form
+ *
+ * P + (c - a_1*b_1 - a_2*b_2)*x*G
+ * - sum_{i=1}^n [s'_i*G_i + s_i*H_i]
+ * + sum_{i=1}^log2(n) [x_i^-2 L_i + x_i^2 R_i]
+ *
+ * which will equal infinity if the inner product proof is correct. Here
+ * - `G` is the standard secp generator
+ * - `x` is a hash of `commit` and is used to rerandomize `c`. See Protocol 2 vs Protocol 1 in the paper.
+ * - `x_i = H(x_{i-1} || L_i || R_i)`, where `x_{-1}` is passed through the `commit` variable and
+ * must be a commitment to `P` and `c`.
+ * - `s_i` and `s'_i` are computed as follows.
+ *
+ * Letting `i_j` be defined as 1 if `i & 2^j == 1`, and -1 otherwise,
+ * - For `i` from `1` to `n/2`, `s'_i = a_1 * prod_{j=1}^log2(n) x_j^i_j`
+ * - For `i` from `n/2 + 1` to `n`, `s'_i = a_2 * prod_{j=1}^log2(n) x_j^i_j`
+ * - For `i` from `1` to `n/2`, `s_i = b_1 * prod_{j=1}^log2(n) x_j^-i_j`
+ * - For `i` from `n/2 + 1` to `n`, `s_i = b_2 * prod_{j=1}^log2(n) x_j^-i_j`
+ *
+ * Observe that these can be computed iteratively by labelling the coefficients `s_i` for `i`
+ * from `0` to `2n-1` rather than 1-indexing and distinguishing between `s_i'`s and `s_i`s:
+ *
+ * Start with `s_0 = a_1 * prod_{j=1}^log2(n) x_j^-1`, then for later `s_i`s,
+ * - For `i` from `1` to `n/2 - 1`, multiply some earlier `s'_j` by some `x_k^2`
+ * - For `i = n/2`, multiply `s_{i-1} by `a_2/a_1`.
+ * - For `i` from `n/2 + 1` to `n - 1`, multiply some earlier `s'_j` by some `x_k^2`
+ * - For `i = n`, multiply `s'_{i-1}` by `b_1/a_2` to get `s_i`.
+ * - For `i` from `n + 1` to `3n/2 - 1`, multiply some earlier `s_j` by some `x_k^-2`
+ * - For `i = 3n/2`, multiply `s_{i-1}` by `b_2/b_1`.
+ * - For `i` from `3n/2 + 1` to `2n - 1`, multiply some earlier `s_j` by some `x_k^-2`
+ * where of course, the indices `j` and `k` must be chosen carefully.
+ *
+ * The bulk of `secp256k1_bulletproof_innerproduct_vfy_ecmult_callback` involves computing
+ * these indices, given `a_2/a_1`, `b_1/a_1`, `b_2/b_1`, and the `x_k^2`s as input. It
+ * computes `x_k^-2` as a side-effect of its other computation.
+ */
+
+typedef int (secp256k1_bulletproof_vfy_callback)(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data);
+
+/* used by callers to wrap a proof with surrounding context */
+typedef struct {
+ const unsigned char *proof;
+ secp256k1_scalar p_offs;
+ secp256k1_scalar yinv;
+ unsigned char commit[32];
+ secp256k1_bulletproof_vfy_callback *rangeproof_cb;
+ void *rangeproof_cb_data;
+ size_t n_extra_rangeproof_points;
+} secp256k1_bulletproof_innerproduct_context;
+
+/* used internally */
+typedef struct {
+ const secp256k1_bulletproof_innerproduct_context *proof;
+ secp256k1_scalar abinv[IP_AB_SCALARS];
+ secp256k1_scalar xsq[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
+ secp256k1_scalar xsqinv[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
+ secp256k1_scalar xsqinvy[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
+ secp256k1_scalar xcache[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
+ secp256k1_scalar xsqinv_mask;
+ const unsigned char *serialized_lr;
+} secp256k1_bulletproof_innerproduct_vfy_data;
+
+/* used by callers to modify the multiexp */
+typedef struct {
+ size_t n_proofs;
+ secp256k1_scalar p_offs;
+ const secp256k1_ge *g;
+ const secp256k1_ge *geng;
+ const secp256k1_ge *genh;
+ size_t vec_len;
+ size_t lg_vec_len;
+ int shared_g;
+ secp256k1_scalar *randomizer;
+ secp256k1_bulletproof_innerproduct_vfy_data *proof;
+} secp256k1_bulletproof_innerproduct_vfy_ecmult_context;
+
+size_t secp256k1_bulletproof_innerproduct_proof_length(size_t n) {
+ if (n < IP_AB_SCALARS / 2) {
+ return 32 * (1 + 2 * n);
+ } else {
+ size_t bit_count = POPCOUNT(n);
+ size_t log = secp256k1_floor_lg(2 * n / IP_AB_SCALARS);
+ return 32 * (1 + 2 * (bit_count - 1 + log) + IP_AB_SCALARS) + (2*log + 7) / 8;
+ }
+}
+
+/* Our ecmult_multi function takes `(c - a*b)*x` directly and multiplies this by `G`. For every other
+ * (scalar, point) pair it calls the following callback function, which takes an index and outputs a
+ * pair. The function therefore has three regimes:
+ *
+ * For the first `n` invocations, it returns `(s'_i, G_i)` for `i` from 1 to `n`.
+ * For the next `n` invocations, it returns `(s_i, H_i)` for `i` from 1 to `n`.
+ * For the next `2*log2(n)` invocations it returns `(x_i^-2, L_i)` and `(x_i^2, R_i)`,
+ * alternating between the two choices, for `i` from 1 to `log2(n)`.
+ *
+ * For the remaining invocations it passes through to another callback, `rangeproof_cb_data` which
+ * computes `P`. The reason for this is that in practice `P` is usually defined by another multiexp
+ * rather than being a known point, and it is more efficient to compute one exponentiation.
+ *
+ * Inline we refer to the first `2n` coefficients as `s_i` for `i` from 0 to `2n-1`, since that
+ * is the more convenient indexing. In particular we describe (a) how the indices `j` and `k`,
+ * from the big comment block above, are chosen; and (b) when/how each `x_k^-2` is computed.
+ */
+static int secp256k1_bulletproof_innerproduct_vfy_ecmult_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
+ secp256k1_bulletproof_innerproduct_vfy_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_vfy_ecmult_context *) data;
+
+ /* First 2N points use the standard Gi, Hi generators, and the scalars can be aggregated across proofs.
+ * Inside this if clause, `idx` corresponds to the index `i` in the big comment, and runs from 0 to `2n-1`.
+ * Also `ctx->vec_len` corresponds to `n`. */
+ if (idx < 2 * ctx->vec_len) {
+ /* Number of `a` scalars in the proof (same as number of `b` scalars in the proof). Will
+ * be 2 except for very small proofs that have fewer than 2 scalars as input. */
+ const size_t grouping = ctx->vec_len < IP_AB_SCALARS / 2 ? ctx->vec_len : IP_AB_SCALARS / 2;
+ const size_t lg_grouping = secp256k1_floor_lg(grouping);
+ size_t i;
+ VERIFY_CHECK(lg_grouping == 0 || lg_grouping == 1); /* TODO support higher IP_AB_SCALARS */
+
+ /* Determine whether we're multiplying by `G_i`s or `H_i`s. */
+ if (idx < ctx->vec_len) {
+ *pt = ctx->geng[idx];
+ } else {
+ *pt = ctx->genh[idx - ctx->vec_len];
+ }
+
+ secp256k1_scalar_clear(sc);
+ /* Loop over all the different inner product proofs we might be doing at once. Since they
+ * share generators `G_i` and `H_i`, we compute all of their scalars at once and add them.
+ * For each proof we start with the "seed value" `ctx->proof[i].xcache[0]` (see next comment
+ * for its meaning) from which every other scalar derived. We expect the caller to have
+ * randomized this to ensure that this wanton addition cannot enable cancellation attacks.
+ */
+ for (i = 0; i < ctx->n_proofs; i++) {
+ /* To recall from the introductory comment: most `s_i` values are computed by taking an
+ * earlier `s_j` value and multiplying it by some `x_k^2`.
+ *
+ * We now explain the index `j`: it is the largest number with one fewer 1-bits than `i`.
+ * Alternately, the most recently returned `s_j` where `j` has one fewer 1-bits than `i`.
+ *
+ * To ensure that `s_j` is available when we need it, on each iteration we define the
+ * variable `cache_idx` which simply counts the 1-bits in `i`; before returning `s_i`
+ * we store it in `ctx->proof[i].xcache[cache_idx]`. Then later, when we want "most
+ * recently returned `s_j` with one fewer 1-bits than `i`, it'll be sitting right
+ * there in `ctx->proof[i].xcache[cache_idx - 1]`.
+ *
+ * Note that `ctx->proof[i].xcache[0]` will always equal `-a_1 * prod_{i=1}^{n-1} x_i^-2`,
+ * and we expect the caller to have set this.
+ */
+ const size_t cache_idx = POPCOUNT(idx);
+ secp256k1_scalar term;
+ VERIFY_CHECK(cache_idx < SECP256K1_BULLETPROOF_MAX_DEPTH);
+ /* For the special case `cache_idx == 0` (which is true iff `idx == 0`) there is nothing to do. */
+ if (cache_idx > 0) {
+ /* Otherwise, check if this is one of the special indices where we transition from `a_1` to `a_2`,
+ * from `a_2` to `b_1`, or from `b_1` to `b_2`. (For small proofs there is only one transition,
+ * from `a` to `b`.) */
+ if (idx % (ctx->vec_len / grouping) == 0) {
+ const size_t abinv_idx = idx / (ctx->vec_len / grouping) - 1;
+ size_t prev_cache_idx;
+ /* Check if it's the even specialer index where we're transitioning from `a`s to `b`s, from
+ * `G`s to `H`s, and from `x_k^2`s to `x_k^-2`s. In rangeproof and circuit applications,
+ * the caller secretly has a variable `y` such that `H_i` is really `y^-i H_i` for `i` ranging
+ * from 0 to `n-1`. Rather than forcing the caller to tweak every `H_i` herself, which would
+ * be very slow and prevent precomputation, we instead multiply our cached `x_k^-2` values
+ * by `y^(-2^k)` respectively, which will ultimately result in every `s_i` we return having
+ * been multiplied by `y^-i`.
+ *
+ * This is an underhanded trick but the result is that all `n` powers of `y^-i` show up
+ * in the right place, and we only need log-many scalar squarings and multiplications.
+ */
+ if (idx == ctx->vec_len) {
+ secp256k1_scalar yinvn = ctx->proof[i].proof->yinv;
+ size_t j;
+ prev_cache_idx = POPCOUNT(idx - 1);
+ for (j = 0; j < (size_t) CTZ(idx) - lg_grouping; j++) {
+ secp256k1_scalar_mul(&ctx->proof[i].xsqinvy[j], &ctx->proof[i].xsqinv[j], &yinvn);
+ secp256k1_scalar_sqr(&yinvn, &yinvn);
+ }
+ if (lg_grouping == 1) {
+ secp256k1_scalar_mul(&ctx->proof[i].abinv[2], &ctx->proof[i].abinv[2], &yinvn);
+ secp256k1_scalar_sqr(&yinvn, &yinvn);
+ }
+ } else {
+ prev_cache_idx = cache_idx - 1;
+ }
+ /* Regardless of specialness, we multiply by `a_2/a_1` or whatever the appropriate multiplier
+ * is. We expect the caller to have given these to us in the `ctx->proof[i].abinv` array. */
+ secp256k1_scalar_mul(
+ &ctx->proof[i].xcache[cache_idx],
+ &ctx->proof[i].xcache[prev_cache_idx],
+ &ctx->proof[i].abinv[abinv_idx]
+ );
+ /* If it's *not* a special index, just multiply by the appropriate `x_k^2`, or `x_k^-2` in case
+ * we're in the `H_i` half of the multiexp. At this point we can explain the index `k`, which
+ * is computed in the variable `xsq_idx` (`xsqinv_idx` respectively). In light of our discussion
+ * of `j`, we see that this should be "the least significant bit that's 1 in `i` but not `i-1`."
+ * In other words, it is the number of trailing 0 bits in the index `i`. */
+ } else if (idx < ctx->vec_len) {
+ const size_t xsq_idx = CTZ(idx);
+ secp256k1_scalar_mul(&ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xcache[cache_idx - 1], &ctx->proof[i].xsq[xsq_idx]);
+ } else {
+ const size_t xsqinv_idx = CTZ(idx);
+ secp256k1_scalar_mul(&ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xcache[cache_idx - 1], &ctx->proof[i].xsqinvy[xsqinv_idx]);
+ }
+ }
+ term = ctx->proof[i].xcache[cache_idx];
+
+ /* One last trick: compute `x_k^-2` while computing the `G_i` scalars, so that they'll be
+ * available when we need them for the `H_i` scalars. We can do this for every `i` value
+ * that has exactly one 0-bit, i.e. which is a product of all `x_i`s and one `x_k^-1`. By
+ * multiplying that by the special value `prod_{i=1}^n x_i^-1` we obtain simply `x_k^-2`.
+ * We expect the caller to give us this special value in `ctx->proof[i].xsqinv_mask`. */
+ if (idx < ctx->vec_len / grouping && POPCOUNT(idx) == ctx->lg_vec_len - 1) {
+ const size_t xsqinv_idx = CTZ(~idx);
+ secp256k1_scalar_mul(&ctx->proof[i].xsqinv[xsqinv_idx], &ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xsqinv_mask);
+ }
+
+ /* Finally, if the caller, in its computation of `P`, wants to multiply `G_i` or `H_i` by some scalar,
+ * we add that to our sum as well. Again, we trust the randomization in `xcache[0]` to prevent any
+ * cancellation attacks here. */
+ if (ctx->proof[i].proof->rangeproof_cb != NULL) {
+ secp256k1_scalar rangeproof_offset;
+ if ((ctx->proof[i].proof->rangeproof_cb)(&rangeproof_offset, NULL, &ctx->randomizer[i], idx, ctx->proof[i].proof->rangeproof_cb_data) == 0) {
+ return 0;
+ }
+ secp256k1_scalar_add(&term, &term, &rangeproof_offset);
+ }
+
+ secp256k1_scalar_add(sc, sc, &term);
+ }
+ /* Next 2lgN points are the L and R vectors */
+ } else if (idx < 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs)) {
+ size_t real_idx = idx - 2 * ctx->vec_len;
+ const size_t proof_idx = real_idx / (2 * ctx->lg_vec_len);
+ real_idx = real_idx % (2 * ctx->lg_vec_len);
+ secp256k1_bulletproof_deserialize_point(
+ pt,
+ ctx->proof[proof_idx].serialized_lr,
+ real_idx,
+ 2 * ctx->lg_vec_len
+ );
+ if (idx % 2 == 0) {
+ *sc = ctx->proof[proof_idx].xsq[real_idx / 2];
+ } else {
+ *sc = ctx->proof[proof_idx].xsqinv[real_idx / 2];
+ }
+ secp256k1_scalar_mul(sc, sc, &ctx->randomizer[proof_idx]);
+ /* After the G's, H's, L's and R's, do the blinding_gen */
+ } else if (idx == 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs)) {
+ *sc = ctx->p_offs;
+ *pt = *ctx->g;
+ /* Remaining points are whatever the rangeproof wants */
+ } else if (ctx->shared_g && idx == 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs) + 1) {
+ /* Special case: the first extra point is independent of the proof, for both rangeproof and circuit */
+ size_t i;
+ secp256k1_scalar_clear(sc);
+ for (i = 0; i < ctx->n_proofs; i++) {
+ secp256k1_scalar term;
+ if ((ctx->proof[i].proof->rangeproof_cb)(&term, pt, &ctx->randomizer[i], 2 * (ctx->vec_len + ctx->lg_vec_len), ctx->proof[i].proof->rangeproof_cb_data) == 0) {
+ return 0;
+ }
+ secp256k1_scalar_add(sc, sc, &term);
+ }
+ } else {
+ size_t proof_idx = 0;
+ size_t real_idx = idx - 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs) - 1 - !!ctx->shared_g;
+ while (real_idx >= ctx->proof[proof_idx].proof->n_extra_rangeproof_points - !!ctx->shared_g) {
+ real_idx -= ctx->proof[proof_idx].proof->n_extra_rangeproof_points - !!ctx->shared_g;
+ proof_idx++;
+ VERIFY_CHECK(proof_idx < ctx->n_proofs);
+ }
+ if ((ctx->proof[proof_idx].proof->rangeproof_cb)(sc, pt, &ctx->randomizer[proof_idx], 2 * (ctx->vec_len + ctx->lg_vec_len), ctx->proof[proof_idx].proof->rangeproof_cb_data) == 0) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* nb For security it is essential that `commit_inp` already commit to all data
+ * needed to compute `P`. We do not hash it in during verification since `P`
+ * may be specified indirectly as a bunch of scalar offsets.
+ */
+static int secp256k1_bulletproof_inner_product_verify_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, const secp256k1_bulletproof_generators *gens, size_t vec_len, const secp256k1_bulletproof_innerproduct_context *proof, size_t n_proofs, size_t plen, int shared_g) {
+ secp256k1_sha256 sha256;
+ secp256k1_bulletproof_innerproduct_vfy_ecmult_context ecmult_data;
+ unsigned char commit[32];
+ size_t total_n_points = 2 * vec_len + !!shared_g + 1; /* +1 for shared G (value_gen), +1 for H (blinding_gen) */
+ secp256k1_gej r;
+ secp256k1_scalar zero;
+ size_t i;
+
+ if (plen != secp256k1_bulletproof_innerproduct_proof_length(vec_len)) {
+ return 0;
+ }
+
+ if (n_proofs == 0) {
+ return 1;
+ }
+
+ if (!secp256k1_scratch_allocate_frame(scratch, n_proofs * (sizeof(*ecmult_data.randomizer) + sizeof(*ecmult_data.proof)), 2)) {
+ return 0;
+ }
+
+ secp256k1_scalar_clear(&zero);
+ ecmult_data.n_proofs = n_proofs;
+ ecmult_data.g = gens->blinding_gen;
+ ecmult_data.geng = gens->gens;
+ ecmult_data.genh = gens->gens + gens->n / 2;
+ ecmult_data.vec_len = vec_len;
+ ecmult_data.lg_vec_len = secp256k1_floor_lg(2 * vec_len / IP_AB_SCALARS);
+ ecmult_data.shared_g = shared_g;
+ ecmult_data.randomizer = (secp256k1_scalar *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*ecmult_data.randomizer));
+ ecmult_data.proof = (secp256k1_bulletproof_innerproduct_vfy_data *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*ecmult_data.proof));
+ /* Seed RNG for per-proof randomizers */
+ secp256k1_sha256_initialize(&sha256);
+ for (i = 0; i < n_proofs; i++) {
+ secp256k1_sha256_write(&sha256, proof[i].proof, plen);
+ secp256k1_sha256_write(&sha256, proof[i].commit, 32);
+ secp256k1_scalar_get_b32(commit, &proof[i].p_offs);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ }
+ secp256k1_sha256_finalize(&sha256, commit);
+
+ secp256k1_scalar_clear(&ecmult_data.p_offs);
+ for (i = 0; i < n_proofs; i++) {
+ const unsigned char *serproof = proof[i].proof;
+ unsigned char proof_commit[32];
+ secp256k1_scalar dot;
+ secp256k1_scalar ab[IP_AB_SCALARS];
+ secp256k1_scalar negprod;
+ secp256k1_scalar x;
+ int overflow;
+ size_t j;
+ const size_t n_ab = 2 * vec_len < IP_AB_SCALARS ? 2 * vec_len : IP_AB_SCALARS;
+
+ total_n_points += 2 * ecmult_data.lg_vec_len + proof[i].n_extra_rangeproof_points - !!shared_g; /* -1 for shared G */
+
+ /* Extract dot product, will always be the first 32 bytes */
+ secp256k1_scalar_set_b32(&dot, serproof, &overflow);
+ if (overflow) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ /* Commit to dot product */
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, proof[i].commit, 32);
+ secp256k1_sha256_write(&sha256, serproof, 32);
+ secp256k1_sha256_finalize(&sha256, proof_commit);
+ serproof += 32;
+
+ /* Extract a, b */
+ for (j = 0; j < n_ab; j++) {
+ secp256k1_scalar_set_b32(&ab[j], serproof, &overflow);
+ if (overflow) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ /* TODO our verifier currently bombs out with zeros because it uses
+ * scalar inverses gratuitously. Fix that. */
+ if (secp256k1_scalar_is_zero(&ab[j])) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ serproof += 32;
+ }
+ secp256k1_scalar_dot_product(&negprod, &ab[0], &ab[n_ab / 2], n_ab / 2);
+
+ ecmult_data.proof[i].proof = &proof[i];
+ /* set per-proof randomizer */
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_finalize(&sha256, commit);
+ secp256k1_scalar_set_b32(&ecmult_data.randomizer[i], commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ecmult_data.randomizer[i])) {
+ /* cryptographically unreachable */
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+
+ /* Compute x*(dot - a*b) for each proof; add it and p_offs to the p_offs accumulator */
+ secp256k1_scalar_set_b32(&x, proof_commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&x)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ secp256k1_scalar_negate(&negprod, &negprod);
+ secp256k1_scalar_add(&negprod, &negprod, &dot);
+ secp256k1_scalar_mul(&x, &x, &negprod);
+ secp256k1_scalar_add(&x, &x, &proof[i].p_offs);
+
+ secp256k1_scalar_mul(&x, &x, &ecmult_data.randomizer[i]);
+ secp256k1_scalar_add(&ecmult_data.p_offs, &ecmult_data.p_offs, &x);
+
+ /* Special-case: trivial proofs are valid iff the explicitly revealed scalars
+ * dot to the explicitly revealed dot product. */
+ if (2 * vec_len <= IP_AB_SCALARS) {
+ if (!secp256k1_scalar_is_zero(&negprod)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ /* remaining data does not (and cannot) be computed for proofs with no a's or b's. */
+ if (vec_len == 0) {
+ continue;
+ }
+ }
+
+ /* Compute the inverse product and the array of squares; the rest will be filled
+ * in by the callback during the multiexp. */
+ ecmult_data.proof[i].serialized_lr = serproof; /* bookmark L/R location in proof */
+ negprod = ab[n_ab - 1];
+ ab[n_ab - 1] = ecmult_data.randomizer[i]; /* build r * x1 * x2 * ... * xn in last slot of `ab` array */
+ for (j = 0; j < ecmult_data.lg_vec_len; j++) {
+ secp256k1_scalar xi;
+ const size_t lidx = 2 * j;
+ const size_t ridx = 2 * j + 1;
+ const size_t bitveclen = (2 * ecmult_data.lg_vec_len + 7) / 8;
+ const unsigned char lrparity = 2 * !!(serproof[lidx / 8] & (1 << (lidx % 8))) + !!(serproof[ridx / 8] & (1 << (ridx % 8)));
+ /* Map commit -> H(commit || LR parity || Lx || Rx), compute xi from it */
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, proof_commit, 32);
+ secp256k1_sha256_write(&sha256, &lrparity, 1);
+ secp256k1_sha256_write(&sha256, &serproof[32 * lidx + bitveclen], 32);
+ secp256k1_sha256_write(&sha256, &serproof[32 * ridx + bitveclen], 32);
+ secp256k1_sha256_finalize(&sha256, proof_commit);
+
+ secp256k1_scalar_set_b32(&xi, proof_commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&xi)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ secp256k1_scalar_mul(&ab[n_ab - 1], &ab[n_ab - 1], &xi);
+ secp256k1_scalar_sqr(&ecmult_data.proof[i].xsq[j], &xi);
+ }
+ /* Compute inverse of all a's and b's, except the last b whose inverse is not needed.
+ * Also compute the inverse of (-r * x1 * ... * xn) which will be needed */
+ secp256k1_scalar_inverse_all_var(ecmult_data.proof[i].abinv, ab, n_ab);
+ ab[n_ab - 1] = negprod;
+
+ /* Compute (-a0 * r * x1 * ... * xn)^-1 which will be used to mask out individual x_i^-2's */
+ secp256k1_scalar_negate(&ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].abinv[0]);
+ secp256k1_scalar_mul(&ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].abinv[n_ab - 1]);
+
+ /* Compute each scalar times the previous' inverse, which is used to switch between a's and b's */
+ for (j = n_ab - 1; j > 0; j--) {
+ size_t prev_idx;
+ if (j == n_ab / 2) {
+ prev_idx = j - 1; /* we go from a_n to b_0 */
+ } else {
+ prev_idx = j & (j - 1); /* but from a_i' to a_i, where i' is i with its lowest set bit unset */
+ }
+ secp256k1_scalar_mul(
+ &ecmult_data.proof[i].abinv[j - 1],
+ &ecmult_data.proof[i].abinv[prev_idx],
+ &ab[j]
+ );
+ }
+
+ /* Extract -a0 * r * (x1 * ... * xn)^-1 which is our first coefficient. Use negprod as a dummy */
+ secp256k1_scalar_mul(&negprod, &ecmult_data.randomizer[i], &ab[0]); /* r*a */
+ secp256k1_scalar_sqr(&negprod, &negprod); /* (r*a)^2 */
+ secp256k1_scalar_mul(&ecmult_data.proof[i].xcache[0], &ecmult_data.proof[i].xsqinv_mask, &negprod); /* -a * r * (x1 * x2 * ... * xn)^-1 */
+ }
+
+ /* Do the multiexp */
+ if (secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &r, NULL, secp256k1_bulletproof_innerproduct_vfy_ecmult_callback, (void *) &ecmult_data, total_n_points) != 1) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ secp256k1_scratch_deallocate_frame(scratch);
+ return secp256k1_gej_is_infinity(&r);
+}
+
+typedef struct {
+ secp256k1_scalar x[SECP256K1_BULLETPROOF_MAX_DEPTH];
+ secp256k1_scalar xinv[SECP256K1_BULLETPROOF_MAX_DEPTH];
+ secp256k1_scalar yinv;
+ secp256k1_scalar yinvn;
+ const secp256k1_ge *geng;
+ const secp256k1_ge *genh;
+ const secp256k1_ge *g;
+ const secp256k1_scalar *a;
+ const secp256k1_scalar *b;
+ secp256k1_scalar g_sc;
+ size_t grouping;
+ size_t n;
+} secp256k1_bulletproof_innerproduct_pf_ecmult_context;
+
+/* At each level i of recursion (i from 0 upto lg(vector size) - 1)
+ * L = a_even . G_odd + b_odd . H_even (18)
+ * which, by expanding the generators into the original G's and H's
+ * and setting n = (1 << i), can be computed as follows:
+ *
+ * For j from 1 to [vector size],
+ * 1. Use H[j] or G[j] as generator, starting with H and switching
+ * every n.
+ * 2. Start with b1 with H and a0 with G, and increment by 2 each switch.
+ * 3. For k = 1, 2, 4, ..., n/2, use the same algorithm to choose
+ * between a and b to choose between x and x^-1, except using
+ * k in place of n. With H's choose x then x^-1, with G's choose
+ * x^-1 then x.
+ *
+ * For R everything is the same except swap G/H and a/b and x/x^-1.
+ */
+static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
+ secp256k1_bulletproof_innerproduct_pf_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_pf_ecmult_context *) data;
+ const size_t ab_idx = (idx / ctx->grouping) ^ 1;
+ size_t i;
+
+ /* Special-case the primary generator */
+ if (idx == ctx->n) {
+ *pt = *ctx->g;
+ *sc = ctx->g_sc;
+ return 1;
+ }
+
+ /* steps 1/2 */
+ if ((idx / ctx->grouping) % 2 == 0) {
+ *pt = ctx->genh[idx];
+ *sc = ctx->b[ab_idx];
+ /* Map h -> h' (eqn 59) */
+ secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
+ } else {
+ *pt = ctx->geng[idx];
+ *sc = ctx->a[ab_idx];
+ }
+
+ /* step 3 */
+ for (i = 0; (1u << i) < ctx->grouping; i++) {
+ size_t grouping = (1u << i);
+ if ((((idx / grouping) % 2) ^ ((idx / ctx->grouping) % 2)) == 0) {
+ secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
+ } else {
+ secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
+ }
+ }
+
+ secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
+ return 1;
+}
+
+/* Identical code except `== 0` changed to `== 1` twice, and the
+ * `+ 1` from Step 1/2 was moved to the other if branch. */
+static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
+ secp256k1_bulletproof_innerproduct_pf_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_pf_ecmult_context *) data;
+ const size_t ab_idx = (idx / ctx->grouping) ^ 1;
+ size_t i;
+
+ /* Special-case the primary generator */
+ if (idx == ctx->n) {
+ *pt = *ctx->g;
+ *sc = ctx->g_sc;
+ return 1;
+ }
+
+ /* steps 1/2 */
+ if ((idx / ctx->grouping) % 2 == 1) {
+ *pt = ctx->genh[idx];
+ *sc = ctx->b[ab_idx];
+ /* Map h -> h' (eqn 59) */
+ secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
+ } else {
+ *pt = ctx->geng[idx];
+ *sc = ctx->a[ab_idx];
+ }
+
+ /* step 3 */
+ for (i = 0; (1u << i) < ctx->grouping; i++) {
+ size_t grouping = (1u << i);
+ if ((((idx / grouping) % 2) ^ ((idx / ctx->grouping) % 2)) == 1) {
+ secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
+ } else {
+ secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
+ }
+ }
+
+ secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
+ return 1;
+}
+
+static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
+ secp256k1_bulletproof_innerproduct_pf_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_pf_ecmult_context *) data;
+ size_t i;
+
+ *pt = ctx->geng[idx];
+ secp256k1_scalar_set_int(sc, 1);
+ for (i = 0; (1u << i) <= ctx->grouping; i++) {
+ if (idx & (1u << i)) {
+ secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
+ } else {
+ secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
+ }
+ }
+ return 1;
+}
+
+static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
+ secp256k1_bulletproof_innerproduct_pf_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_pf_ecmult_context *) data;
+ size_t i;
+
+ *pt = ctx->genh[idx];
+ secp256k1_scalar_set_int(sc, 1);
+ for (i = 0; (1u << i) <= ctx->grouping; i++) {
+ if (idx & (1u << i)) {
+ secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
+ } else {
+ secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
+ }
+ }
+ secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
+ secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
+ return 1;
+}
+
+/* These proofs are not zero-knowledge. There is no need to worry about constant timeness.
+ * `commit_inp` must contain 256 bits of randomness, it is used immediately as a randomizer.
+ */
+static int secp256k1_bulletproof_inner_product_real_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, secp256k1_ge *out_pt, size_t *pt_idx, const secp256k1_ge *g, secp256k1_ge *geng, secp256k1_ge *genh, secp256k1_scalar *a_arr, secp256k1_scalar *b_arr, const secp256k1_scalar *yinv, const secp256k1_scalar *ux, const size_t n, unsigned char *commit) {
+ size_t i;
+ size_t halfwidth;
+
+ secp256k1_bulletproof_innerproduct_pf_ecmult_context pfdata;
+ pfdata.yinv = *yinv;
+ pfdata.g = g;
+ pfdata.geng = geng;
+ pfdata.genh = genh;
+ pfdata.a = a_arr;
+ pfdata.b = b_arr;
+ pfdata.n = n;
+
+ /* Protocol 1: Iterate, halving vector size until it is 1 */
+ for (halfwidth = n / 2, i = 0; halfwidth > IP_AB_SCALARS / 4; halfwidth /= 2, i++) {
+ secp256k1_gej tmplj, tmprj;
+ size_t j;
+ int overflow;
+
+ pfdata.grouping = 1u << i;
+
+ /* L */
+ secp256k1_scalar_clear(&pfdata.g_sc);
+ for (j = 0; j < halfwidth; j++) {
+ secp256k1_scalar prod;
+ secp256k1_scalar_mul(&prod, &a_arr[2*j], &b_arr[2*j + 1]);
+ secp256k1_scalar_add(&pfdata.g_sc, &pfdata.g_sc, &prod);
+ }
+ secp256k1_scalar_mul(&pfdata.g_sc, &pfdata.g_sc, ux);
+
+ secp256k1_scalar_set_int(&pfdata.yinvn, 1);
+ secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &tmplj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l, (void *) &pfdata, n + 1);
+ secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmplj);
+
+ /* R */
+ secp256k1_scalar_clear(&pfdata.g_sc);
+ for (j = 0; j < halfwidth; j++) {
+ secp256k1_scalar prod;
+ secp256k1_scalar_mul(&prod, &a_arr[2*j + 1], &b_arr[2*j]);
+ secp256k1_scalar_add(&pfdata.g_sc, &pfdata.g_sc, &prod);
+ }
+ secp256k1_scalar_mul(&pfdata.g_sc, &pfdata.g_sc, ux);
+
+ secp256k1_scalar_set_int(&pfdata.yinvn, 1);
+ secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &tmprj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r, (void *) &pfdata, n + 1);
+ secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmprj);
+
+ /* x, x^2, x^-1, x^-2 */
+ secp256k1_bulletproof_update_commit(commit, &out_pt[*pt_idx - 2], &out_pt[*pt_idx] - 1);
+ secp256k1_scalar_set_b32(&pfdata.x[i], commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&pfdata.x[i])) {
+ return 0;
+ }
+ secp256k1_scalar_inverse_var(&pfdata.xinv[i], &pfdata.x[i]);
+
+ /* update scalar array */
+ for (j = 0; j < halfwidth; j++) {
+ secp256k1_scalar tmps;
+ secp256k1_scalar_mul(&a_arr[2*j], &a_arr[2*j], &pfdata.x[i]);
+ secp256k1_scalar_mul(&tmps, &a_arr[2*j + 1], &pfdata.xinv[i]);
+ secp256k1_scalar_add(&a_arr[j], &a_arr[2*j], &tmps);
+
+ secp256k1_scalar_mul(&b_arr[2*j], &b_arr[2*j], &pfdata.xinv[i]);
+ secp256k1_scalar_mul(&tmps, &b_arr[2*j + 1], &pfdata.x[i]);
+ secp256k1_scalar_add(&b_arr[j], &b_arr[2*j], &tmps);
+
+ }
+
+ /* Combine G generators and recurse, if that would be more optimal */
+ if ((n > 2048 && i == 3) || (n > 128 && i == 2) || (n > 32 && i == 1)) {
+ secp256k1_scalar yinv2;
+
+ for (j = 0; j < halfwidth; j++) {
+ secp256k1_gej rj;
+ secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &rj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g, (void *) &pfdata, 2u << i);
+ pfdata.geng += 2u << i;
+ secp256k1_ge_set_gej(&geng[j], &rj);
+ secp256k1_scalar_set_int(&pfdata.yinvn, 1);
+ secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &rj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h, (void *) &pfdata, 2u << i);
+ pfdata.genh += 2u << i;
+ secp256k1_ge_set_gej(&genh[j], &rj);
+ }
+
+ secp256k1_scalar_sqr(&yinv2, yinv);
+ for (j = 0; j < i; j++) {
+ secp256k1_scalar_sqr(&yinv2, &yinv2);
+ }
+ if (!secp256k1_bulletproof_inner_product_real_prove_impl(ecmult_ctx, scratch, out_pt, pt_idx, g, geng, genh, a_arr, b_arr, &yinv2, ux, halfwidth, commit)) {
+ return 0;
+ }
+ break;
+ }
+ }
+ return 1;
+}
+
+static int secp256k1_bulletproof_inner_product_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, unsigned char *proof, size_t *proof_len, const secp256k1_bulletproof_generators *gens, const secp256k1_scalar *yinv, const size_t n, secp256k1_ecmult_multi_callback *cb, void *cb_data, const unsigned char *commit_inp) {
+ secp256k1_sha256 sha256;
+ size_t i;
+ unsigned char commit[32];
+ secp256k1_scalar *a_arr;
+ secp256k1_scalar *b_arr;
+ secp256k1_ge *out_pt;
+ secp256k1_ge *geng;
+ secp256k1_ge *genh;
+ secp256k1_scalar ux;
+ int overflow;
+ size_t pt_idx = 0;
+ secp256k1_scalar dot;
+ size_t half_n_ab = n < IP_AB_SCALARS / 2 ? n : IP_AB_SCALARS / 2;
+
+ if (*proof_len < secp256k1_bulletproof_innerproduct_proof_length(n)) {
+ return 0;
+ }
+ *proof_len = secp256k1_bulletproof_innerproduct_proof_length(n);
+
+ /* Special-case lengths 0 and 1 whose proofs are just expliict lists of scalars */
+ if (n <= IP_AB_SCALARS / 2) {
+ secp256k1_scalar a[IP_AB_SCALARS / 2];
+ secp256k1_scalar b[IP_AB_SCALARS / 2];
+
+ for (i = 0; i < n; i++) {
+ cb(&a[i], NULL, 2*i, cb_data);
+ cb(&b[i], NULL, 2*i+1, cb_data);
+ }
+
+ secp256k1_scalar_dot_product(&dot, a, b, n);
+ secp256k1_scalar_get_b32(proof, &dot);
+
+ for (i = 0; i < n; i++) {
+ secp256k1_scalar_get_b32(&proof[32 * (i + 1)], &a[i]);
+ secp256k1_scalar_get_b32(&proof[32 * (i + n + 1)], &b[i]);
+ }
+ VERIFY_CHECK(*proof_len == 32 * (2 * n + 1));
+ return 1;
+ }
+
+ /* setup for nontrivial proofs */
+ if (!secp256k1_scratch_allocate_frame(scratch, 2 * n * (sizeof(secp256k1_scalar) + sizeof(secp256k1_ge)) + 2 * secp256k1_floor_lg(n) * sizeof(secp256k1_ge), 5)) {
+ return 0;
+ }
+
+ a_arr = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_scalar));
+ b_arr = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_scalar));
+ geng = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_ge));
+ genh = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_ge));
+ out_pt = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, 2 * secp256k1_floor_lg(n) * sizeof(secp256k1_ge));
+ VERIFY_CHECK(a_arr != NULL);
+ VERIFY_CHECK(b_arr != NULL);
+ VERIFY_CHECK(gens != NULL);
+
+ for (i = 0; i < n; i++) {
+ cb(&a_arr[i], NULL, 2*i, cb_data);
+ cb(&b_arr[i], NULL, 2*i+1, cb_data);
+ geng[i] = gens->gens[i];
+ genh[i] = gens->gens[i + gens->n/2];
+ }
+
+ /* Record final dot product */
+ secp256k1_scalar_dot_product(&dot, a_arr, b_arr, n);
+ secp256k1_scalar_get_b32(proof, &dot);
+
+ /* Protocol 2: hash dot product to obtain G-randomizer */
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit_inp, 32);
+ secp256k1_sha256_write(&sha256, proof, 32);
+ secp256k1_sha256_finalize(&sha256, commit);
+
+ proof += 32;
+
+ secp256k1_scalar_set_b32(&ux, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ux)) {
+ /* cryptographically unreachable */
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+
+ if (!secp256k1_bulletproof_inner_product_real_prove_impl(ecmult_ctx, scratch, out_pt, &pt_idx, gens->blinding_gen, geng, genh, a_arr, b_arr, yinv, &ux, n, commit)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+
+ /* Final a/b values */
+ for (i = 0; i < half_n_ab; i++) {
+ secp256k1_scalar_get_b32(&proof[32 * i], &a_arr[i]);
+ secp256k1_scalar_get_b32(&proof[32 * (i + half_n_ab)], &b_arr[i]);
+ }
+ proof += 64 * half_n_ab;
+ secp256k1_bulletproof_serialize_points(proof, out_pt, pt_idx);
+
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 1;
+}
+
+#undef IP_AB_SCALARS
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/main_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/main_impl.h
new file mode 100644
index 0000000..f175ba0
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/main_impl.h
@@ -0,0 +1,240 @@
+/**********************************************************************
+ * Copyright (c) 2018 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_BULLETPROOF_MAIN_IMPL
+#define SECP256K1_MODULE_BULLETPROOF_MAIN_IMPL
+
+#include "group.h"
+#include "scalar.h"
+
+#include "modules/commitment/main_impl.h"
+
+struct secp256k1_bulletproof_generators {
+ size_t n;
+ /* `G_i`, `H_i` generators, `n` each of them which are generated when creating this struct */
+ secp256k1_ge *gens;
+ /* `H` "alternate" generator, used in Pedersen commitments. Passed in by caller to
+ * `secp256k1_bulletproof_generators_create`; stored in this structure to allow consistent
+ * generators between functions using `secp256k1_bulletproof_generators` and functions
+ * using the Pedersen commitment module. */
+ secp256k1_ge *blinding_gen;
+};
+
+#include "modules/bulletproofs/inner_product_impl.h"
+#include "modules/bulletproofs/rangeproof_impl.h"
+#include "modules/bulletproofs/util.h"
+
+
+// This is out setup
+secp256k1_bulletproof_generators *secp256k1_bulletproof_generators_create(const secp256k1_context *ctx, const secp256k1_generator *blinding_gen, size_t n) {
+ secp256k1_bulletproof_generators *ret;
+ secp256k1_rfc6979_hmac_sha256 rng;
+ unsigned char seed[64];
+ secp256k1_gej precompj;
+ size_t i;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(blinding_gen != NULL);
+
+ ret = (secp256k1_bulletproof_generators *)checked_malloc(&ctx->error_callback, sizeof(*ret));
+ if (ret == NULL) {
+ return NULL;
+ }
+ ret->gens = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (n + 1) * sizeof(*ret->gens));
+ if (ret->gens == NULL) {
+ free(ret);
+ return NULL;
+ }
+ ret->blinding_gen = &ret->gens[n];
+ ret->n = n;
+
+ secp256k1_fe_get_b32(&seed[0], &secp256k1_ge_const_g.x);
+ secp256k1_fe_get_b32(&seed[32], &secp256k1_ge_const_g.y);
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, seed, 64);
+ for (i = 0; i < n; i++) {
+ unsigned char tmp[32] = { 0 };
+ secp256k1_generator gen;
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, tmp, 32);
+ CHECK(secp256k1_generator_generate(ctx, &gen, tmp));
+ secp256k1_generator_load(&ret->gens[i], &gen);
+
+ secp256k1_gej_set_ge(&precompj, &ret->gens[i]);
+ }
+
+ secp256k1_generator_load(&ret->blinding_gen[0], blinding_gen);
+ secp256k1_gej_set_ge(&precompj, &ret->blinding_gen[0]);
+
+ return ret;
+}
+
+void secp256k1_bulletproof_generators_destroy(const secp256k1_context* ctx, secp256k1_bulletproof_generators *gens) {
+ (void) ctx;
+ if (gens != NULL) {
+ free(gens->gens);
+ free(gens);
+ }
+}
+
+int secp256k1_bulletproof_rangeproof_verify(const secp256k1_context* ctx, secp256k1_scratch_space *scratch, const secp256k1_bulletproof_generators *gens, const unsigned char *proof, size_t plen,
+ const uint64_t *min_value, const secp256k1_pedersen_commitment* commit, size_t n_commits, size_t nbits, const secp256k1_generator *value_gen, const unsigned char *extra_commit, size_t extra_commit_len) {
+ int ret;
+ size_t i;
+ secp256k1_ge *commitp;
+ secp256k1_ge value_genp;
+ const secp256k1_ge *commitp_ptr;
+ const uint64_t *minvalue_ptr;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(scratch != NULL);
+ ARG_CHECK(gens != NULL);
+ ARG_CHECK(gens->n >= 2 * nbits * n_commits);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(commit != NULL);
+ ARG_CHECK(n_commits > 0);
+ ARG_CHECK(nbits > 0);
+ ARG_CHECK(nbits <= 64);
+ ARG_CHECK(value_gen != NULL);
+ ARG_CHECK(extra_commit != NULL || extra_commit_len == 0);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+
+ if (!secp256k1_scratch_allocate_frame(scratch, 2 * n_commits * sizeof(secp256k1_ge), 1)) {
+ return 0;
+ }
+
+ commitp = (secp256k1_ge *)secp256k1_scratch_alloc(scratch, n_commits * sizeof(secp256k1_ge));
+ for (i = 0; i < n_commits; i++) {
+ secp256k1_pedersen_commitment_load(&commitp[i], &commit[i]);
+ }
+ secp256k1_generator_load(&value_genp, value_gen);
+
+ commitp_ptr = commitp;
+ minvalue_ptr = min_value;
+ ret = secp256k1_bulletproof_rangeproof_verify_impl(&ctx->ecmult_ctx, scratch, &proof, 1, plen, nbits, &minvalue_ptr, &commitp_ptr, n_commits, &value_genp, gens, &extra_commit, &extra_commit_len);
+ secp256k1_scratch_deallocate_frame(scratch);
+ return ret;
+}
+
+int secp256k1_bulletproof_rangeproof_verify_multi(const secp256k1_context* ctx, secp256k1_scratch_space *scratch, const secp256k1_bulletproof_generators *gens, const unsigned char* const* proof, size_t n_proofs, size_t plen, const uint64_t* const* min_value, const secp256k1_pedersen_commitment* const* commit, size_t n_commits, size_t nbits, const secp256k1_generator *value_gen, const unsigned char* const* extra_commit, size_t *extra_commit_len) {
+ int ret;
+ secp256k1_ge **commitp;
+ secp256k1_ge *value_genp;
+ size_t i;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(scratch != NULL);
+ ARG_CHECK(gens != NULL);
+ ARG_CHECK(gens->n >= 2 * nbits * n_commits);
+ ARG_CHECK(commit != NULL);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(n_proofs > 0);
+ ARG_CHECK(n_commits > 0);
+ ARG_CHECK(nbits > 0);
+ ARG_CHECK(nbits <= 64);
+ ARG_CHECK(value_gen != NULL);
+ ARG_CHECK((extra_commit_len == NULL) == (extra_commit == NULL));
+ if (extra_commit != NULL) {
+ for (i = 0; i < n_proofs; i++) {
+ ARG_CHECK(extra_commit[i] != NULL || extra_commit_len[i] == 0);
+ }
+ }
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+
+ if (!secp256k1_scratch_allocate_frame(scratch, n_proofs * (sizeof(*value_genp) + sizeof(*commitp) + n_commits * sizeof(**commitp)), 1 + n_proofs)) {
+ return 0;
+ }
+
+ commitp = (secp256k1_ge **)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*commitp));
+ value_genp = (secp256k1_ge *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*value_genp));
+ for (i = 0; i < n_proofs; i++) {
+ size_t j;
+ commitp[i] = (secp256k1_ge *)secp256k1_scratch_alloc(scratch, n_commits * sizeof(*commitp[i]));
+ for (j = 0; j < n_commits; j++) {
+ secp256k1_pedersen_commitment_load(&commitp[i][j], &commit[i][j]);
+ }
+ secp256k1_generator_load(&value_genp[i], &value_gen[i]);
+ }
+
+ ret = secp256k1_bulletproof_rangeproof_verify_impl(&ctx->ecmult_ctx, scratch, proof, n_proofs, plen, nbits, min_value, (const secp256k1_ge **) commitp, n_commits, value_genp, gens, extra_commit, extra_commit_len);
+ secp256k1_scratch_deallocate_frame(scratch);
+ return ret;
+}
+
+int secp256k1_bulletproof_rangeproof_rewind(const secp256k1_context* ctx, const secp256k1_bulletproof_generators *gens, uint64_t *value, unsigned char *blind, const unsigned char *proof, size_t plen, uint64_t min_value, const secp256k1_pedersen_commitment* commit, const secp256k1_generator *value_gen, const unsigned char *nonce, const unsigned char *extra_commit, size_t extra_commit_len) {
+ secp256k1_scalar blinds;
+ int ret;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(value != NULL);
+ ARG_CHECK(blind != NULL);
+ ARG_CHECK(gens != NULL);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(commit != NULL);
+ ARG_CHECK(value_gen != NULL);
+ ARG_CHECK(nonce != NULL);
+ ARG_CHECK(extra_commit != NULL || extra_commit_len == 0);
+
+ ret = secp256k1_bulletproof_rangeproof_rewind_impl(value, &blinds, proof, plen, min_value, commit, value_gen, gens->blinding_gen, nonce, extra_commit, extra_commit_len);
+ if (ret == 1) {
+ secp256k1_scalar_get_b32(blind, &blinds);
+ }
+ return ret;
+}
+
+// Put everything inside a struct, so that we can receive as input this struct and the commitment to the value
+int secp256k1_bulletproof_rangeproof_prove(const secp256k1_context* ctx, secp256k1_scratch_space *scratch, const secp256k1_bulletproof_generators *gens, unsigned char *proof, size_t *plen, const uint64_t *value, const uint64_t *min_value, const unsigned char* const* blind, size_t n_commits, const secp256k1_generator *value_gen, size_t nbits, const unsigned char *nonce, const unsigned char *extra_commit, size_t extra_commit_len) {
+ int ret;
+ secp256k1_ge *commitp;
+ secp256k1_scalar *blinds;
+ secp256k1_ge value_genp;
+ size_t i;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(scratch != NULL);
+ ARG_CHECK(gens != NULL);
+ ARG_CHECK(gens->n >= 2 * nbits * n_commits);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(plen != NULL);
+ ARG_CHECK(value != NULL);
+ ARG_CHECK(blind != NULL);
+ ARG_CHECK(value_gen != NULL);
+ ARG_CHECK(nonce != NULL);
+ ARG_CHECK(n_commits > 0 && n_commits);
+ ARG_CHECK(nbits <= 64);
+ if (nbits < 64) {
+ for (i = 0; i < n_commits; i++) {
+ ARG_CHECK(value[i] < (1ull << nbits));
+ ARG_CHECK(blind[i] != NULL);
+ }
+ }
+ ARG_CHECK(extra_commit != NULL || extra_commit_len == 0);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+
+ if (!secp256k1_scratch_allocate_frame(scratch, n_commits * (sizeof(*commitp) + sizeof(*blinds)), 2)) {
+ return 0;
+ }
+ commitp = (secp256k1_ge *)secp256k1_scratch_alloc(scratch, n_commits * sizeof(*commitp));
+ blinds = (secp256k1_scalar *)secp256k1_scratch_alloc(scratch, n_commits * sizeof(*blinds));
+
+ secp256k1_generator_load(&value_genp, value_gen);
+ for (i = 0; i < n_commits; i++) {
+ int overflow;
+ secp256k1_gej commitj;
+ secp256k1_scalar_set_b32(&blinds[i], blind[i], &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&blinds[i])) {
+ return 0;
+ }
+ secp256k1_pedersen_ecmult(&commitj, &blinds[i], value[i], &value_genp, &gens->blinding_gen[0]);
+ secp256k1_ge_set_gej(&commitp[i], &commitj);
+ }
+
+ ret = secp256k1_bulletproof_rangeproof_prove_impl(&ctx->ecmult_ctx, scratch, proof, plen, nbits, value, min_value, blinds, commitp, n_commits, &value_genp, gens, nonce, extra_commit, extra_commit_len);
+ secp256k1_scratch_deallocate_frame(scratch);
+ return ret;
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/rangeproof_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/rangeproof_impl.h
new file mode 100644
index 0000000..6287edb
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/rangeproof_impl.h
@@ -0,0 +1,792 @@
+/**********************************************************************
+ * Copyright (c) 2018 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_BULLETPROOF_RANGEPROOF_IMPL
+#define SECP256K1_MODULE_BULLETPROOF_RANGEPROOF_IMPL
+
+#include "modules/bulletproofs/inner_product_impl.h"
+#include "modules/bulletproofs/util.h"
+#include "group.h"
+
+#define MAX_NBITS 64
+
+typedef struct {
+ secp256k1_scalar yinv;
+ secp256k1_scalar yinvn;
+ secp256k1_scalar z;
+ secp256k1_scalar z_randomized;
+ secp256k1_scalar zsq;
+ secp256k1_scalar g_exponent;
+ secp256k1_scalar negz;
+ secp256k1_scalar x;
+ secp256k1_ge a;
+ secp256k1_ge s;
+ size_t n;
+ /* eq (61) stuff */
+ size_t count;
+ secp256k1_scalar randomizer61;
+ secp256k1_scalar y;
+ secp256k1_scalar t;
+ const secp256k1_ge *asset;
+ const secp256k1_ge *commit;
+ const uint64_t *min_value;
+ size_t n_commits;
+ secp256k1_ge t1;
+ secp256k1_ge t2;
+} secp256k1_bulletproof_vfy_ecmult_context;
+
+static int secp256k1_bulletproof_rangeproof_vfy_callback(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data) {
+ secp256k1_bulletproof_vfy_ecmult_context *ctx = (secp256k1_bulletproof_vfy_ecmult_context *) data;
+
+ if (idx == 0) {
+ secp256k1_scalar_mul(&ctx->g_exponent, &ctx->negz, randomizer);
+ secp256k1_scalar_mul(&ctx->z_randomized, &ctx->z, randomizer);
+ }
+
+ if (idx < ctx->n) {
+ *sc = ctx->g_exponent;
+ } else if (idx < 2 * ctx->n) {
+ const size_t nbits = ctx->n / ctx->n_commits;
+ const size_t commit_idx = (idx - ctx->n) / nbits;
+ const size_t bit_idx = (idx - ctx->n) % nbits;
+
+ if (bit_idx == 0) {
+ size_t i;
+ secp256k1_scalar tmp;
+ secp256k1_scalar_mul(&tmp, &ctx->z, &ctx->yinvn);
+ secp256k1_scalar_sqr(&ctx->zsq, &ctx->z);
+ for (i = 0; i < commit_idx; i++) {
+ secp256k1_scalar_mul(&ctx->zsq, &ctx->zsq, &tmp);
+ }
+ secp256k1_scalar_mul(&ctx->zsq, &ctx->zsq, randomizer);
+ }
+ secp256k1_scalar_add(sc, &ctx->zsq, &ctx->z_randomized);
+
+ secp256k1_scalar_mul(&ctx->zsq, &ctx->zsq, &ctx->yinv);
+ secp256k1_scalar_add(&ctx->zsq, &ctx->zsq, &ctx->zsq);
+ } else {
+ switch(ctx->count) {
+ /* S^x in eq (62) */
+ case 2:
+ *sc = ctx->x;
+ *pt = ctx->s;
+ break;
+ /* A in eq (62) */
+ case 1:
+ *pt = ctx->a;
+ secp256k1_scalar_set_int(sc, 1);
+ break;
+ /* G^[k(y, z) + sum_i y^i - t] from eq (61) */
+ case 0: {
+ size_t i;
+ secp256k1_scalar yn;
+ secp256k1_scalar twosum;
+ secp256k1_scalar tmp;
+
+ secp256k1_scalar_clear(&twosum);
+ secp256k1_scalar_clear(&yn);
+ secp256k1_scalar_set_int(&tmp, 1);
+
+ secp256k1_scalar_sqr(&ctx->zsq, &ctx->z); /* need to re-set this */
+ secp256k1_scalar_negate(sc, &ctx->zsq); /* -z^2 */
+ secp256k1_scalar_add(sc, sc, &ctx->z); /* z - z^2 */
+
+ for (i = 0; i < ctx->n_commits; i++) {
+ const size_t nbits = ctx->n / ctx->n_commits;
+ secp256k1_scalar negzn;
+ secp256k1_scalar twon;
+ size_t j;
+
+ secp256k1_scalar_clear(&twon);
+ for (j = 0; j < nbits; j++) {
+ secp256k1_scalar_mul(&yn, &yn, &ctx->y);
+ secp256k1_scalar_add(&twon, &twon, &twon);
+
+ secp256k1_scalar_add(&yn, &yn, &tmp);
+ secp256k1_scalar_add(&twon, &twon, &tmp);
+ }
+
+ secp256k1_scalar_mul(&negzn, &ctx->zsq, &ctx->negz);
+ for (j = 0; j < i; j++) {
+ secp256k1_scalar_mul(&negzn, &negzn, &ctx->z);
+ }
+ if (ctx->min_value != NULL) {
+ secp256k1_scalar mv;
+ secp256k1_scalar_set_int(&mv, ctx->min_value[i]);
+ secp256k1_scalar_mul(&mv, &mv, &ctx->negz);
+ secp256k1_scalar_mul(&mv, &mv, &ctx->z);
+ for (j = 0; j < i; j++) {
+ secp256k1_scalar_mul(&negzn, &negzn, &ctx->z);
+ }
+ secp256k1_scalar_add(&twosum, &twosum, &mv);
+ }
+ secp256k1_scalar_mul(&twon, &twon, &negzn);
+ secp256k1_scalar_add(&twosum, &twosum, &twon);
+ } /* yn = 1 + y + ... + y^(n-1); twosum = (z^3 + ... + z^{2 + n_commits})(1 + 2 + ... + 2^(n-1)) */
+
+
+ secp256k1_scalar_mul(sc, sc, &yn); /* (z - z^2)(1 + ... + y^(n-1)) */
+ secp256k1_scalar_add(sc, sc, &twosum); /* (z - z^2)(1 + ... + y^(n-1)) - z^3(1 + ... + 2^(n-1)) */
+ secp256k1_scalar_negate(&tmp, &ctx->t);
+ secp256k1_scalar_add(sc, sc, &tmp); /* (z - z^2)(1 + ... + y^n) - z^3(1 + ... + 2^n) - t */
+ secp256k1_scalar_mul(sc, sc, &ctx->randomizer61);
+ *pt = *ctx->asset;
+ break;
+ }
+ /* T1^x in eq (61) */
+ case 3:
+ secp256k1_scalar_mul(sc, &ctx->x, &ctx->randomizer61);
+ *pt = ctx->t1;
+ break;
+ /* T2^x^2 in eq (61) */
+ case 4:
+ secp256k1_scalar_sqr(sc, &ctx->x);
+ secp256k1_scalar_mul(sc, sc, &ctx->randomizer61);
+ *pt = ctx->t2;
+ break;
+ /* V^z^2 in eq (61) */
+ default:
+ VERIFY_CHECK(ctx->count < 5 + ctx->n_commits);
+
+ secp256k1_scalar_mul(sc, &ctx->zsq, &ctx->randomizer61);
+ secp256k1_scalar_mul(&ctx->zsq, &ctx->zsq, &ctx->z);
+ *pt = ctx->commit[ctx->count - 5];
+ break;
+ }
+ secp256k1_scalar_mul(sc, sc, randomizer);
+ ctx->count++;
+ }
+ return 1;
+}
+
+static int secp256k1_bulletproof_rangeproof_verify_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, const unsigned char* const* proof, const size_t n_proofs, const size_t plen, size_t nbits, const uint64_t* const* min_value, const secp256k1_ge* const* commitp, size_t n_commits, const secp256k1_ge *value_gen, const secp256k1_bulletproof_generators *gens, const unsigned char* const* extra_commit, size_t *extra_commit_len) {
+ secp256k1_bulletproof_vfy_ecmult_context *ecmult_data;
+ secp256k1_bulletproof_innerproduct_context *innp_ctx;
+ int ret;
+ size_t i;
+ int same_generators = 1;
+
+ /* sanity-check input */
+ if (POPCOUNT(nbits) != 1 || nbits > MAX_NBITS) {
+ return 0;
+ }
+ if (plen < 64 + 128 + 1) { /* inner product argument will do a more precise check */
+ return 0;
+ }
+ if (plen > SECP256K1_BULLETPROOF_MAX_PROOF) {
+ return 0;
+ }
+
+ if (!secp256k1_scratch_allocate_frame(scratch, n_proofs * (sizeof(*ecmult_data) + sizeof(*innp_ctx)), 2)) {
+ return 0;
+ }
+ ecmult_data = (secp256k1_bulletproof_vfy_ecmult_context *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*ecmult_data));
+ innp_ctx = (secp256k1_bulletproof_innerproduct_context *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*innp_ctx));
+
+ /* In general you cannot memcmp secp256k1_ge's like this because their field
+ * elements may represent the same number differently. In this case it is ok
+ * because (a) a false positive here is no big deal, it will add one mult per
+ * proof to he giant ecmult_multi at the end but not change any semantics;
+ * and (b) typically this list of generators was deterministically decoded
+ * from a list of secp256k1_generators which have a compact encoding, so that
+ * equal group elements actually will compare equal. */
+ for (i = 1; i < n_proofs; i++) {
+ if (memcmp(&value_gen[i], &value_gen[i - 1], sizeof(value_gen[i])) != 0) {
+ same_generators = 0;
+ }
+ }
+
+ for (i = 0; i < n_proofs; i++) {
+ secp256k1_sha256 sha256;
+ unsigned char commit[32] = {0};
+ unsigned char randomizer61[32] = {0}; /* randomizer for eq (61) so we can add it to eq (62) to save a separate multiexp */
+ secp256k1_scalar taux, mu;
+ secp256k1_ge age, sge;
+ int overflow;
+ size_t j;
+
+ /* Commit to all input data: min value, pedersen commit, asset generator, extra_commit */
+ if (min_value != NULL && min_value[i] != NULL) {
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ for (j = 0; j < n_commits; j++) {
+ unsigned char vbuf[8];
+ vbuf[0] = min_value[i][j];
+ vbuf[1] = min_value[i][j] >> 8;
+ vbuf[2] = min_value[i][j] >> 16;
+ vbuf[3] = min_value[i][j] >> 24;
+ vbuf[4] = min_value[i][j] >> 32;
+ vbuf[5] = min_value[i][j] >> 40;
+ vbuf[6] = min_value[i][j] >> 48;
+ vbuf[7] = min_value[i][j] >> 56;
+ secp256k1_sha256_write(&sha256, vbuf, 8);
+ }
+ secp256k1_sha256_finalize(&sha256, commit);
+ }
+ for (j = 0; j < n_commits; j++) {
+ secp256k1_bulletproof_update_commit(commit, &commitp[i][j], &value_gen[i]);
+ }
+ if (extra_commit != NULL && extra_commit[i] != NULL) {
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, extra_commit[i], extra_commit_len[i]);
+ secp256k1_sha256_finalize(&sha256, commit);
+ }
+
+ /* Compute y, z, x */
+ secp256k1_bulletproof_deserialize_point(&age, &proof[i][64], 0, 4);
+ secp256k1_bulletproof_deserialize_point(&sge, &proof[i][64], 1, 4);
+
+ secp256k1_bulletproof_update_commit(commit, &age, &sge);
+ secp256k1_scalar_set_b32(&ecmult_data[i].y, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].y)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ secp256k1_bulletproof_update_commit(commit, &age, &sge);
+ secp256k1_scalar_set_b32(&ecmult_data[i].z, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].z)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+
+ secp256k1_bulletproof_deserialize_point(&ecmult_data[i].t1, &proof[i][64], 2, 4);
+ secp256k1_bulletproof_deserialize_point(&ecmult_data[i].t2, &proof[i][64], 3, 4);
+
+ secp256k1_bulletproof_update_commit(commit, &ecmult_data[i].t1, &ecmult_data[i].t2);
+ secp256k1_scalar_set_b32(&ecmult_data[i].x, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].x)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+
+ /* compute exponent offsets */
+ secp256k1_scalar_inverse_var(&ecmult_data[i].yinv, &ecmult_data[i].y); /* TODO somehow batch this w the inner-product argument inverse */
+ ecmult_data[i].yinvn = ecmult_data[i].yinv;
+ for (j = 0; j < secp256k1_floor_lg(nbits); j++) {
+ secp256k1_scalar_sqr(&ecmult_data[i].yinvn, &ecmult_data[i].yinvn);
+ }
+ secp256k1_scalar_sqr(&ecmult_data[i].zsq, &ecmult_data[i].z);
+ secp256k1_scalar_negate(&ecmult_data[i].negz, &ecmult_data[i].z);
+
+ /* Update commit with remaining data for the inner product proof */
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, &proof[i][0], 64);
+ secp256k1_sha256_finalize(&sha256, commit);
+
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_finalize(&sha256, randomizer61);
+ secp256k1_scalar_set_b32(&ecmult_data[i].randomizer61, randomizer61, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].randomizer61)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+
+ /* Deserialize everything else */
+ secp256k1_scalar_set_b32(&taux, &proof[i][0], &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&taux)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ secp256k1_scalar_set_b32(&mu, &proof[i][32], &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&mu)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+ /* A little sketchy, we read t (l(x) . r(x)) off the front of the inner product proof,
+ * which we otherwise treat as a black box */
+ secp256k1_scalar_set_b32(&ecmult_data[i].t, &proof[i][64 + 128 + 1], &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].t)) {
+ secp256k1_scratch_deallocate_frame(scratch);
+ return 0;
+ }
+
+ /* Verify inner product proof */
+ ecmult_data[i].a = age;
+ ecmult_data[i].s = sge;
+ ecmult_data[i].n = nbits * n_commits;
+ ecmult_data[i].count = 0;
+ ecmult_data[i].asset = &value_gen[i];
+ ecmult_data[i].min_value = min_value == NULL ? NULL : min_value[i];
+ ecmult_data[i].commit = commitp[i];
+ ecmult_data[i].n_commits = n_commits;
+ secp256k1_scalar_mul(&taux, &taux, &ecmult_data[i].randomizer61);
+ secp256k1_scalar_add(&mu, &mu, &taux);
+
+ innp_ctx[i].proof = &proof[i][64 + 128 + 1];
+ innp_ctx[i].p_offs = mu;
+ memcpy(innp_ctx[i].commit, commit, 32);
+ innp_ctx[i].yinv = ecmult_data[i].yinv;
+ innp_ctx[i].rangeproof_cb = secp256k1_bulletproof_rangeproof_vfy_callback;
+ innp_ctx[i].rangeproof_cb_data = (void *) &ecmult_data[i];
+ innp_ctx[i].n_extra_rangeproof_points = 5 + n_commits;
+ }
+
+ ret = secp256k1_bulletproof_inner_product_verify_impl(ecmult_ctx, scratch, gens, nbits * n_commits, innp_ctx, n_proofs, plen - (64 + 128 + 1), same_generators);
+ secp256k1_scratch_deallocate_frame(scratch);
+ return ret;
+}
+
+typedef struct {
+ const unsigned char *nonce;
+ secp256k1_scalar y;
+ secp256k1_scalar z;
+ secp256k1_scalar yn;
+ secp256k1_scalar z22n;
+ const uint64_t *val;
+ const uint64_t *min_val;
+ size_t n_vals;
+ size_t nbits;
+ size_t count;
+} secp256k1_bulletproof_lr_generator;
+
+static void secp256k1_lr_generator_init(secp256k1_bulletproof_lr_generator *generator, const unsigned char *nonce, const secp256k1_scalar *y, const secp256k1_scalar *z, size_t nbits, const uint64_t *val, const uint64_t *min_val, size_t n_vals) {
+ generator->nonce = nonce;
+ generator->y = *y;
+ generator->z = *z;
+ secp256k1_scalar_set_int(&generator->yn, 1);
+ generator->nbits = nbits;
+ generator->val = val;
+ generator->min_val = min_val;
+ generator->n_vals = n_vals;
+ generator->count = 0;
+}
+
+static void secp256k1_lr_generate(secp256k1_bulletproof_lr_generator *generator, secp256k1_scalar *lout, secp256k1_scalar *rout, const secp256k1_scalar *x) {
+ const size_t commit_idx = generator->count / generator->nbits;
+ const size_t bit_idx = generator->count % generator->nbits;
+ const uint64_t mv = generator->min_val == NULL ? 0 : generator->min_val[commit_idx];
+ const int bit = ((generator->val[commit_idx] - mv)>> bit_idx) & 1;
+ secp256k1_scalar sl, sr;
+ secp256k1_scalar negz;
+
+ if (bit_idx == 0) {
+ size_t i;
+ secp256k1_scalar_sqr(&generator->z22n, &generator->z);
+ for (i = 0; i < commit_idx; i++) {
+ secp256k1_scalar_mul(&generator->z22n, &generator->z22n, &generator->z);
+ }
+ }
+
+ secp256k1_scalar_chacha20(&sl, &sr, generator->nonce, generator->count + 2);
+ secp256k1_scalar_mul(&sl, &sl, x);
+ secp256k1_scalar_mul(&sr, &sr, x);
+
+ secp256k1_scalar_set_int(lout, bit);
+ secp256k1_scalar_negate(&negz, &generator->z);
+ secp256k1_scalar_add(lout, lout, &negz);
+ secp256k1_scalar_add(lout, lout, &sl);
+
+ secp256k1_scalar_set_int(rout, 1 - bit);
+ secp256k1_scalar_negate(rout, rout);
+ secp256k1_scalar_add(rout, rout, &generator->z);
+ secp256k1_scalar_add(rout, rout, &sr);
+ secp256k1_scalar_mul(rout, rout, &generator->yn);
+ secp256k1_scalar_add(rout, rout, &generator->z22n);
+
+ generator->count++;
+ secp256k1_scalar_mul(&generator->yn, &generator->yn, &generator->y);
+ secp256k1_scalar_add(&generator->z22n, &generator->z22n, &generator->z22n);
+}
+
+typedef struct {
+ secp256k1_scalar x;
+ secp256k1_scalar cache;
+ secp256k1_bulletproof_lr_generator lr_gen;
+} secp256k1_bulletproof_abgh_data;
+
+static int secp256k1_bulletproof_abgh_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
+ secp256k1_bulletproof_abgh_data *ctx = (secp256k1_bulletproof_abgh_data *) data;
+ const int is_g = idx % 2 == 0;
+
+ (void) pt;
+ if (is_g) {
+ secp256k1_lr_generate(&ctx->lr_gen, sc, &ctx->cache, &ctx->x);
+ } else {
+ *sc = ctx->cache;
+ }
+
+ return 1;
+}
+
+/* Proof format: t, tau_x, mu, a, b, A, S, T_1, T_2, {L_i}, {R_i}
+ * 5 scalar + [4 + 2log(n)] ge
+ *
+ * The non-bold `h` in the Bulletproofs paper corresponds to our gens->blinding_gen
+ * while the non-bold `g` corresponds to the asset type `value_gen`.
+ */
+static int secp256k1_bulletproof_rangeproof_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, unsigned char *proof, size_t *plen, const size_t nbits, const uint64_t *value, const uint64_t *min_value, const secp256k1_scalar *blind, const secp256k1_ge *commitp, size_t n_commits, const secp256k1_ge *value_gen, const secp256k1_bulletproof_generators *gens, const unsigned char *nonce, const unsigned char *extra_commit, size_t extra_commit_len) {
+ secp256k1_bulletproof_lr_generator lr_gen;
+ secp256k1_bulletproof_abgh_data abgh_data;
+ secp256k1_scalar zero;
+ secp256k1_sha256 sha256;
+ unsigned char commit[32] = {0};
+ secp256k1_scalar alpha, rho;
+ secp256k1_scalar t0, t1, t2;
+ secp256k1_scalar tau1, tau2, taux, mu;
+ secp256k1_scalar y;
+ secp256k1_scalar z, zsq;
+ secp256k1_scalar x, xsq;
+ secp256k1_scalar tmps;
+ secp256k1_gej aj, sj;
+ secp256k1_gej tmpj;
+ size_t i, j;
+ int overflow;
+ /* inner product proof variables */
+ secp256k1_ge out_pt[4];
+
+ if (POPCOUNT(nbits) != 1 || nbits > MAX_NBITS) {
+ return 0;
+ }
+ for (i = 0; i < n_commits; i++) {
+ uint64_t mv = min_value == NULL ? 0 : min_value[i];
+ if (mv > value[i]) {
+ return 0;
+ }
+ if (nbits < 64 && (value[i] - mv) >= (1ull << nbits)) {
+ return 0;
+ }
+ }
+ if (*plen < 128 + 64 + 1) { /* inner product argument will check and assign plen */
+ return 0;
+ }
+
+ secp256k1_scalar_clear(&zero);
+
+ /* Commit to all input data: min value, pedersen commit, asset generator, extra_commit */
+ if (min_value != NULL) {
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ for (i = 0; i < n_commits; i++) {
+ unsigned char vbuf[8];
+ vbuf[0] = min_value[i];
+ vbuf[1] = min_value[i] >> 8;
+ vbuf[2] = min_value[i] >> 16;
+ vbuf[3] = min_value[i] >> 24;
+ vbuf[4] = min_value[i] >> 32;
+ vbuf[5] = min_value[i] >> 40;
+ vbuf[6] = min_value[i] >> 48;
+ vbuf[7] = min_value[i] >> 56;
+ secp256k1_sha256_write(&sha256, vbuf, 8);
+ }
+ secp256k1_sha256_finalize(&sha256, commit);
+ }
+ for (i = 0; i < n_commits; i++) {
+ secp256k1_bulletproof_update_commit(commit, &commitp[i], value_gen); /* TODO be less stupid about this */
+ }
+ if (extra_commit != NULL) {
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, extra_commit, extra_commit_len);
+ secp256k1_sha256_finalize(&sha256, commit);
+ }
+
+ secp256k1_scalar_chacha20(&alpha, &rho, nonce, 0);
+ secp256k1_scalar_chacha20(&tau1, &tau2, nonce, 1);
+ /* Encrypt value into alpha, so it will be recoverable from -mu by someone who knows `nonce` */
+ if (n_commits == 1) {
+ secp256k1_scalar vals;
+ secp256k1_scalar_set_u64(&vals, value[0]);
+ secp256k1_scalar_negate(&vals, &vals); /* Negate so it'll be positive in -mu */
+ secp256k1_scalar_add(&alpha, &alpha, &vals);
+ }
+
+ /* Compute A and S */
+ secp256k1_ecmult_const(&aj, &gens->blinding_gen[0], &alpha, 256);
+ secp256k1_ecmult_const(&sj, &gens->blinding_gen[0], &rho, 256);
+ for (i = 0; i < n_commits; i++) {
+ for (j = 0; j < nbits; j++) {
+ secp256k1_scalar sl, sr;
+ uint64_t mv = min_value == NULL ? 0 : min_value[i];
+ size_t al = !!((value[i] - mv) & (1ull << j));
+ secp256k1_ge aterm = gens->gens[i * nbits + j + gens->n/2];
+ secp256k1_ge sterm;
+ secp256k1_gej stermj;
+
+ secp256k1_scalar_chacha20(&sl, &sr, nonce, i * nbits + j + 2);
+
+ secp256k1_ge_neg(&aterm, &aterm);
+ secp256k1_fe_cmov(&aterm.x, &gens->gens[i * nbits + j].x, al);
+ secp256k1_fe_cmov(&aterm.y, &gens->gens[i * nbits + j].y, al);
+
+ secp256k1_gej_add_ge(&aj, &aj, &aterm);
+
+ secp256k1_ecmult_const(&stermj, &gens->gens[i * nbits + j], &sl, 256);
+ secp256k1_ge_set_gej(&sterm, &stermj);
+ secp256k1_gej_add_ge(&sj, &sj, &sterm);
+ secp256k1_ecmult_const(&stermj, &gens->gens[i * nbits + j + gens->n/2], &sr, 256);
+ secp256k1_ge_set_gej(&sterm, &stermj);
+ secp256k1_gej_add_ge(&sj, &sj, &sterm);
+ }
+ }
+
+ /* get challenges y and z */
+ secp256k1_ge_set_gej(&out_pt[0], &aj);
+ secp256k1_ge_set_gej(&out_pt[1], &sj);
+
+ secp256k1_bulletproof_update_commit(commit, &out_pt[0], &out_pt[1]);
+ secp256k1_scalar_set_b32(&y, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&y)) {
+ return 0;
+ }
+ secp256k1_bulletproof_update_commit(commit, &out_pt[0], &out_pt[1]); /* TODO rehashing A and S to get a second challenge is overkill */
+ secp256k1_scalar_set_b32(&z, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&z)) {
+ return 0;
+ }
+ secp256k1_scalar_sqr(&zsq, &z);
+
+ /* Compute coefficients t0, t1, t2 of the polynomial */
+ /* t0 = l(0) dot r(0) */
+ secp256k1_lr_generator_init(&lr_gen, nonce, &y, &z, nbits, value, min_value, n_commits);
+ secp256k1_scalar_clear(&t0);
+ for (i = 0; i < nbits * n_commits; i++) {
+ secp256k1_scalar l, r;
+ secp256k1_lr_generate(&lr_gen, &l, &r, &zero);
+ secp256k1_scalar_mul(&l, &l, &r);
+ secp256k1_scalar_add(&t0, &t0, &l);
+ }
+
+ /* A = t0 + t1 + t2 = l(1) dot r(1) */
+ secp256k1_lr_generator_init(&lr_gen, nonce, &y, &z, nbits, value, min_value, n_commits);
+ secp256k1_scalar_clear(&t1);
+ for (i = 0; i < nbits * n_commits; i++) {
+ secp256k1_scalar one;
+ secp256k1_scalar l, r;
+ secp256k1_scalar_set_int(&one, 1);
+ secp256k1_lr_generate(&lr_gen, &l, &r, &one);
+ secp256k1_scalar_mul(&l, &l, &r);
+ secp256k1_scalar_add(&t1, &t1, &l);
+ }
+
+ /* B = t0 - t1 + t2 = l(-1) dot r(-1) */
+ secp256k1_lr_generator_init(&lr_gen, nonce, &y, &z, nbits, value, min_value, n_commits);
+ secp256k1_scalar_clear(&t2);
+ for (i = 0; i < nbits * n_commits; i++) {
+ secp256k1_scalar negone;
+ secp256k1_scalar l, r;
+ secp256k1_scalar_set_int(&negone, 1);
+ secp256k1_scalar_negate(&negone, &negone);
+ secp256k1_lr_generate(&lr_gen, &l, &r, &negone);
+ secp256k1_scalar_mul(&l, &l, &r);
+ secp256k1_scalar_add(&t2, &t2, &l);
+ }
+
+ /* t1 = (A - B)/2 */
+ secp256k1_scalar_set_int(&tmps, 2);
+ secp256k1_scalar_inverse_var(&tmps, &tmps);
+ secp256k1_scalar_negate(&t2, &t2);
+ secp256k1_scalar_add(&t1, &t1, &t2);
+ secp256k1_scalar_mul(&t1, &t1, &tmps);
+
+ /* t2 = -(-B + t0) + t1 */
+ secp256k1_scalar_add(&t2, &t2, &t0);
+ secp256k1_scalar_negate(&t2, &t2);
+ secp256k1_scalar_add(&t2, &t2, &t1);
+
+ /* Compute Ti = t_i*A + tau_i*G for i = 1,2 */
+ /* TODO surely we can improve this */
+ secp256k1_ecmult_const(&tmpj, value_gen, &t1, 256);
+ secp256k1_ge_set_gej(&out_pt[2], &tmpj);
+ secp256k1_ecmult_const(&tmpj, &gens->blinding_gen[0], &tau1, 256);
+ secp256k1_gej_add_ge(&tmpj, &tmpj, &out_pt[2]);
+ secp256k1_ge_set_gej(&out_pt[2], &tmpj);
+
+ secp256k1_ecmult_const(&tmpj, value_gen, &t2, 256);
+ secp256k1_ge_set_gej(&out_pt[3], &tmpj);
+ secp256k1_ecmult_const(&tmpj, &gens->blinding_gen[0], &tau2, 256);
+ secp256k1_gej_add_ge(&tmpj, &tmpj, &out_pt[3]);
+ secp256k1_ge_set_gej(&out_pt[3], &tmpj);
+
+ /* get challenge x */
+ secp256k1_bulletproof_update_commit(commit, &out_pt[2], &out_pt[3]);
+ secp256k1_scalar_set_b32(&x, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&x)) {
+ return 0;
+ }
+ secp256k1_scalar_sqr(&xsq, &x);
+
+ /* compute tau_x and mu */
+ secp256k1_scalar_mul(&taux, &tau1, &x);
+ secp256k1_scalar_mul(&tmps, &tau2, &xsq);
+ secp256k1_scalar_add(&taux, &taux, &tmps);
+ for (i = 0; i < n_commits; i++) {
+ secp256k1_scalar_mul(&tmps, &zsq, &blind[i]);
+ secp256k1_scalar_add(&taux, &taux, &tmps);
+ secp256k1_scalar_mul(&zsq, &zsq, &z);
+ }
+
+ secp256k1_scalar_mul(&mu, &rho, &x);
+ secp256k1_scalar_add(&mu, &mu, &alpha);
+
+ /* Negate taux and mu so the verifier doesn't have to */
+ secp256k1_scalar_negate(&taux, &taux);
+ secp256k1_scalar_negate(&mu, &mu);
+
+ /* Encode rangeproof stuff */
+ secp256k1_scalar_get_b32(&proof[0], &taux);
+ secp256k1_scalar_get_b32(&proof[32], &mu);
+ secp256k1_bulletproof_serialize_points(&proof[64], out_pt, 4);
+
+ /* Mix this into the hash so the input to the inner product proof is fixed */
+ /* TODO is this necessary? revisit */
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, proof, 64);
+ secp256k1_sha256_finalize(&sha256, commit);
+
+ /* Compute l and r, do inner product proof */
+ abgh_data.x = x;
+ secp256k1_lr_generator_init(&abgh_data.lr_gen, nonce, &y, &z, nbits, value, min_value, n_commits);
+ *plen -= 64 + 128 + 1;
+ secp256k1_scalar_inverse_var(&y, &y);
+ if (secp256k1_bulletproof_inner_product_prove_impl(ecmult_ctx, scratch, &proof[64 + 128 + 1], plen, gens, &y, nbits * n_commits, secp256k1_bulletproof_abgh_callback, (void *) &abgh_data, commit) == 0) {
+ return 0;
+ }
+ *plen += 64 + 128 + 1;
+
+ return 1;
+}
+
+static int secp256k1_bulletproof_rangeproof_rewind_impl(uint64_t *value, secp256k1_scalar *blind, const unsigned char *proof, const size_t plen, uint64_t min_value, const secp256k1_pedersen_commitment *pcommit, const secp256k1_generator *value_gen, const secp256k1_ge *blind_gen, const unsigned char *nonce, const unsigned char *extra_commit, size_t extra_commit_len) {
+ secp256k1_sha256 sha256;
+ static const unsigned char zero24[24] = { 0 };
+ unsigned char commit[32] = { 0 };
+ unsigned char lrparity;
+ secp256k1_scalar taux, mu;
+ secp256k1_scalar alpha, rho, tau1, tau2;
+ secp256k1_scalar x, z;
+ secp256k1_ge commitp, value_genp;
+ secp256k1_gej rewind_commitj;
+ int overflow;
+
+ if (plen < 64 + 128 + 1 || plen > SECP256K1_BULLETPROOF_MAX_PROOF) {
+ return 0;
+ }
+
+ /* Extract data from beginning of proof */
+ secp256k1_scalar_set_b32(&taux, &proof[0], &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&taux)) {
+ return 0;
+ }
+ secp256k1_scalar_set_b32(&mu, &proof[32], &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&mu)) {
+ return 0;
+ }
+
+ secp256k1_scalar_chacha20(&alpha, &rho, nonce, 0);
+ secp256k1_scalar_chacha20(&tau1, &tau2, nonce, 1);
+
+ if (min_value > 0) {
+ unsigned char vbuf[8];
+ vbuf[0] = min_value;
+ vbuf[1] = min_value >> 8;
+ vbuf[2] = min_value >> 16;
+ vbuf[3] = min_value >> 24;
+ vbuf[4] = min_value >> 32;
+ vbuf[5] = min_value >> 40;
+ vbuf[6] = min_value >> 48;
+ vbuf[7] = min_value >> 56;
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, vbuf, 8);
+ secp256k1_sha256_finalize(&sha256, commit);
+ }
+
+ /* This breaks the abstraction of both the Pedersen commitment and the generator
+ * type by directly reading the parity bit and x-coordinate from the data. But
+ * the alternative using the _load functions is to do two full point decompression,
+ * and in my benchmarks we save ~80% of the rewinding time by avoiding this. -asp */
+ lrparity = 2 * !!(pcommit->data[0] & 1) + !!(value_gen->data[0] & 1);
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, &lrparity, 1);
+ secp256k1_sha256_write(&sha256, &pcommit->data[1], 32);
+ secp256k1_sha256_write(&sha256, &value_gen->data[1], 32);
+ secp256k1_sha256_finalize(&sha256, commit);
+
+ if (extra_commit != NULL) {
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, extra_commit, extra_commit_len);
+ secp256k1_sha256_finalize(&sha256, commit);
+ }
+
+ /* Extract A and S to compute y and z */
+ lrparity = 2 * !!(proof[64] & 1) + !!(proof[64] & 2);
+ /* y */
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, &lrparity, 1);
+ secp256k1_sha256_write(&sha256, &proof[65], 64);
+ secp256k1_sha256_finalize(&sha256, commit);
+
+ /* z */
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, &lrparity, 1);
+ secp256k1_sha256_write(&sha256, &proof[65], 64);
+ secp256k1_sha256_finalize(&sha256, commit);
+
+ secp256k1_scalar_set_b32(&z, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&z)) {
+ return 0;
+ }
+
+ /* x */
+ lrparity = 2 * !!(proof[64] & 4) + !!(proof[64] & 8);
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, &lrparity, 1);
+ secp256k1_sha256_write(&sha256, &proof[129], 64);
+ secp256k1_sha256_finalize(&sha256, commit);
+
+ secp256k1_scalar_set_b32(&x, commit, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&x)) {
+ return 0;
+ }
+
+ /* Compute candidate mu and add to (negated) mu from proof to get value */
+ secp256k1_scalar_mul(&rho, &rho, &x);
+ secp256k1_scalar_add(&mu, &mu, &rho);
+ secp256k1_scalar_add(&mu, &mu, &alpha);
+
+ secp256k1_scalar_get_b32(commit, &mu);
+ if (memcmp(commit, zero24, 24) != 0) {
+ return 0;
+ }
+ *value = commit[31] + ((uint64_t) commit[30] << 8) +
+ ((uint64_t) commit[29] << 16) + ((uint64_t) commit[28] << 24) +
+ ((uint64_t) commit[27] << 32) + ((uint64_t) commit[26] << 40) +
+ ((uint64_t) commit[25] << 48) + ((uint64_t) commit[24] << 56);
+
+ /* Derive blinding factor */
+ secp256k1_scalar_mul(&tau1, &tau1, &x);
+ secp256k1_scalar_mul(&tau2, &tau2, &x);
+ secp256k1_scalar_mul(&tau2, &tau2, &x);
+
+ secp256k1_scalar_add(&taux, &taux, &tau1);
+ secp256k1_scalar_add(&taux, &taux, &tau2);
+
+ secp256k1_scalar_sqr(&z, &z);
+ secp256k1_scalar_inverse_var(&z, &z);
+ secp256k1_scalar_mul(blind, &taux, &z);
+ secp256k1_scalar_negate(blind, blind);
+
+ /* Check blinding factor */
+ secp256k1_pedersen_commitment_load(&commitp, pcommit);
+ secp256k1_generator_load(&value_genp, value_gen);
+
+ secp256k1_pedersen_ecmult(&rewind_commitj, blind, *value, &value_genp, blind_gen);
+ secp256k1_gej_neg(&rewind_commitj, &rewind_commitj);
+ secp256k1_gej_add_ge_var(&rewind_commitj, &rewind_commitj, &commitp, NULL);
+
+ return secp256k1_gej_is_infinity(&rewind_commitj);
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/tests_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/tests_impl.h
new file mode 100644
index 0000000..8702e3d
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/tests_impl.h
@@ -0,0 +1,608 @@
+/**********************************************************************
+ * Copyright (c) 2018 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_BULLETPROOF_TESTS
+#define SECP256K1_MODULE_BULLETPROOF_TESTS
+
+#include
+
+#include "group.h"
+#include "scalar.h"
+#include "testrand.h"
+#include "util.h"
+
+#include "include/secp256k1_bulletproofs.h"
+
+static void test_bulletproof_api(void) {
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ secp256k1_scratch *scratch = secp256k1_scratch_space_create(ctx, 1024 * 1024);
+ secp256k1_generator value_gen;
+ secp256k1_bulletproof_generators *gens;
+ secp256k1_pedersen_commitment pcommit[4];
+ const secp256k1_pedersen_commitment *pcommit_arr[1];
+ unsigned char proof[2000];
+ const unsigned char *proof_ptr = proof;
+ const unsigned char blind[32] = " i am not a blinding factor ";
+ const unsigned char *blind_ptr[4];
+ size_t blindlen = sizeof(blind);
+ size_t plen = sizeof(proof);
+ uint64_t value[4] = { 1234, 4567, 8910, 1112 } ;
+ uint64_t min_value[4] = { 1000, 4567, 0, 5000 } ;
+ const uint64_t *mv_ptr = min_value;
+ unsigned char rewind_blind[32];
+ size_t rewind_v;
+
+ int32_t ecount = 0;
+
+ blind_ptr[0] = blind;
+ blind_ptr[1] = blind;
+ blind_ptr[2] = blind;
+ blind_ptr[3] = blind;
+ pcommit_arr[0] = pcommit;
+
+ secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
+
+ CHECK(secp256k1_generator_generate(both, &value_gen, blind) != 0);
+ CHECK(secp256k1_pedersen_commit(both, &pcommit[0], blind, value[0], &value_gen, &secp256k1_generator_const_h) != 0);
+ CHECK(secp256k1_pedersen_commit(both, &pcommit[1], blind, value[1], &value_gen, &secp256k1_generator_const_h) != 0);
+ CHECK(secp256k1_pedersen_commit(both, &pcommit[2], blind, value[2], &value_gen, &secp256k1_generator_const_h) != 0);
+ CHECK(secp256k1_pedersen_commit(both, &pcommit[3], blind, value[3], &value_gen, &secp256k1_generator_const_h) != 0);
+
+ /* generators */
+ gens = secp256k1_bulletproof_generators_create(none, NULL, 256);
+ CHECK(gens == NULL && ecount == 1);
+ gens = secp256k1_bulletproof_generators_create(none, &secp256k1_generator_const_h, 256);
+ CHECK(gens != NULL && ecount == 1);
+
+ /* rangeproof_prove */
+ ecount = 0;
+ CHECK(secp256k1_bulletproof_rangeproof_prove(none, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(sign, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(vrfy, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 1);
+ CHECK(ecount == 3);
+ plen = 2000;
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 2, &value_gen, 64, blind, NULL, 0) == 1);
+ CHECK(ecount == 3);
+ plen = 2000;
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 4, &value_gen, 64, blind, NULL, 0) == 0); /* too few gens */
+ CHECK(ecount == 4);
+
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, min_value, blind_ptr, 2, &value_gen, 64, blind, NULL, 0) == 1); /* mv = v, ok */
+ CHECK(ecount == 4);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, &value[1], &min_value[1], blind_ptr, 2, &value_gen, 64, blind, NULL, 0) == 1); /* mv = 0, ok */
+ CHECK(ecount == 4);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, &value[2], &min_value[2], blind_ptr, 2, &value_gen, 64, blind, NULL, 0) == 0); /* mv > v, !ok */
+ CHECK(ecount == 4);
+
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, NULL, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, NULL, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, NULL, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 7);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, NULL, value, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 8);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, blind_ptr, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 9);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, NULL, 1, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 10);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 0, &value_gen, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, NULL, 64, blind, NULL, 0) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 0, blind, NULL, 0) == 0);
+ CHECK(ecount == 13);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 65, blind, NULL, 0) == 0);
+ CHECK(ecount == 14);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, -1, blind, NULL, 0) == 0);
+ CHECK(ecount == 15);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, NULL, NULL, 0) == 0);
+ CHECK(ecount == 16);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, NULL, blind_ptr, 1, &value_gen, 64, blind, blind, 0) == 1);
+ CHECK(ecount == 16);
+ CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, value, min_value, blind_ptr, 1, &value_gen, 64, blind, blind, 32) == 1);
+ CHECK(ecount == 16);
+
+ /* rangeproof_verify */
+ ecount = 0;
+ CHECK(secp256k1_bulletproof_rangeproof_verify(none, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(sign, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(vrfy, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 1);
+ CHECK(ecount == 2);
+
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 63, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen - 1, min_value, pcommit, 1, 63, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, 0, min_value, pcommit, 1, 63, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 31) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, NULL, 0) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 2, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 4, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 3);
+
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, NULL, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, NULL, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, NULL, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, NULL, pcommit, 1, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, NULL, 1, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 7);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 0, 64, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 8);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 65, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 9);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 0, &value_gen, blind, 32) == 0);
+ CHECK(ecount == 10);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, NULL, blind, 32) == 0);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, NULL, 32) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 0) == 0);
+ CHECK(ecount == 12);
+
+ /* verify_multi */
+ ecount = 0;
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(none, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(sign, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(vrfy, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 1);
+ CHECK(ecount == 2);
+
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, NULL, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, NULL, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, NULL, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 0, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, NULL, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, NULL, 1, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 7);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, NULL, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 8);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, NULL, &blindlen) == 0);
+ CHECK(ecount == 9);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, NULL) == 0);
+ CHECK(ecount == 10);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, NULL, NULL) == 0);
+ CHECK(ecount == 10);
+
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 0, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 65, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 63, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 0, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 13);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 2, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 13);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 4, 64, &value_gen, blind_ptr, &blindlen) == 0);
+ CHECK(ecount == 14);
+
+ /* Rewind */
+ ecount = 0;
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 32) == 1);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, NULL, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 32) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, NULL, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 32) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, NULL, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 32) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, NULL, plen, min_value[0], pcommit, &value_gen, blind, blind, 32) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, 0, min_value[0], pcommit, &value_gen, blind, blind, 32) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, plen, 0, pcommit, &value_gen, blind, blind, 32) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, plen, min_value[0], NULL, &value_gen, blind, blind, 32) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, NULL, blind, blind, 32) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, NULL, blind, 32) == 0);
+ CHECK(ecount == 7);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, NULL, 32) == 0);
+ CHECK(ecount == 8);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 0) == 0);
+ CHECK(ecount == 8);
+ CHECK(secp256k1_bulletproof_rangeproof_rewind(none, gens, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, NULL, 0) == 0);
+ CHECK(ecount == 8);
+
+ secp256k1_bulletproof_generators_destroy(none, gens);
+ secp256k1_bulletproof_generators_destroy(none, NULL);
+ secp256k1_scratch_destroy(scratch);
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+}
+
+#define MAX_WIDTH (1ul << 20)
+typedef struct {
+ const secp256k1_scalar *a;
+ const secp256k1_scalar *b;
+ const secp256k1_ge *g;
+ const secp256k1_ge *h;
+ size_t n;
+} test_bulletproof_ecmult_context;
+
+static int test_bulletproof_ecmult_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
+ test_bulletproof_ecmult_context *ecctx = (test_bulletproof_ecmult_context *) data;
+ if (idx < ecctx->n) {
+ *sc = ecctx->a[idx];
+ *pt = ecctx->g[idx];
+ } else {
+ VERIFY_CHECK(idx < 2*ecctx->n);
+ *sc = ecctx->b[idx - ecctx->n];
+ *pt = ecctx->h[idx - ecctx->n];
+ }
+ return 1;
+}
+
+typedef struct {
+ secp256k1_scalar offs;
+ secp256k1_scalar ext_sc;
+ secp256k1_scalar skew_sc;
+ secp256k1_ge ext_pt;
+ secp256k1_ge p;
+ size_t n;
+ int parity;
+} test_bulletproof_offset_context;
+
+static int test_bulletproof_offset_vfy_callback(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data) {
+ test_bulletproof_offset_context *ecctx = (test_bulletproof_offset_context *) data;
+ secp256k1_scalar_set_int(&ecctx->offs, 1);
+ if (idx < 2 * ecctx->n) {
+ secp256k1_scalar idxsc;
+ secp256k1_scalar_set_int(&idxsc, idx);
+ secp256k1_scalar_mul(sc, &ecctx->skew_sc, &idxsc);
+ } else {
+ if (ecctx->parity) {
+ *sc = ecctx->ext_sc;
+ *pt = ecctx->ext_pt;
+ } else {
+ secp256k1_scalar_set_int(sc, 1);
+ *pt = ecctx->p;
+ }
+ }
+ secp256k1_scalar_mul(sc, sc, randomizer);
+ ecctx->parity = !ecctx->parity;
+ return 1;
+}
+
+typedef struct {
+ const secp256k1_scalar *a_arr;
+ const secp256k1_scalar *b_arr;
+} secp256k1_bulletproof_ip_test_abgh_data;
+
+
+static int secp256k1_bulletproof_ip_test_abgh_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
+ secp256k1_bulletproof_ip_test_abgh_data *cbctx = (secp256k1_bulletproof_ip_test_abgh_data *) data;
+ const int is_g = idx % 2 == 0;
+
+ (void) pt;
+ if (is_g) {
+ *sc = cbctx->a_arr[idx / 2];
+ } else {
+ *sc = cbctx->b_arr[idx / 2];
+ }
+ return 1;
+}
+
+void test_bulletproof_inner_product(size_t n, const secp256k1_bulletproof_generators *gens) {
+ const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0,0,0,0,0,0,0,0);
+ secp256k1_gej pj;
+ secp256k1_gej tmpj, tmpj2;
+ secp256k1_scalar *a_arr = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, n * sizeof(*a_arr));
+ secp256k1_scalar *b_arr = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, n * sizeof(*b_arr));
+ unsigned char commit[32] = "hash of P, c, etc. all that jazz";
+ secp256k1_scalar one;
+ size_t j;
+ test_bulletproof_offset_context offs_ctx;
+ secp256k1_bulletproof_ip_test_abgh_data abgh_data;
+ secp256k1_bulletproof_innerproduct_context innp_ctx;
+ unsigned char proof[2000];
+ size_t plen = sizeof(proof);
+
+ secp256k1_scratch *scratch = secp256k1_scratch_space_create(ctx, 100000 + 256 * (2 * n + 2));
+
+ for (j = 0; j < n; j++) {
+ random_scalar_order(&a_arr[j]);
+ random_scalar_order(&b_arr[j]);
+ }
+
+ abgh_data.a_arr = a_arr;
+ abgh_data.b_arr = b_arr;
+
+ random_group_element_test(&offs_ctx.ext_pt);
+ random_scalar_order(&offs_ctx.ext_sc);
+ secp256k1_scalar_clear(&offs_ctx.skew_sc);
+ offs_ctx.n = n;
+
+ secp256k1_scalar_set_int(&one, 1);
+ CHECK(secp256k1_bulletproof_inner_product_prove_impl(&ctx->ecmult_ctx, scratch, proof, &plen, gens, &one, n, secp256k1_bulletproof_ip_test_abgh_callback, (void *) &abgh_data, commit) == 1);
+
+ innp_ctx.proof = proof;
+ memcpy(innp_ctx.commit, commit, 32);
+ secp256k1_scalar_set_int(&innp_ctx.yinv, 1);
+ innp_ctx.n_extra_rangeproof_points = 1;
+ innp_ctx.rangeproof_cb = test_bulletproof_offset_vfy_callback;
+ innp_ctx.rangeproof_cb_data = (void *) &offs_ctx;
+
+ /* Manually do the multiexp to obtain the point P which commits to the inner product.
+ * The prover never computes this because it is implicit in the range/circuit proofs. */
+ {
+ test_bulletproof_ecmult_context ecmult_data;
+ ecmult_data.n = n;
+ ecmult_data.a = a_arr;
+ ecmult_data.b = b_arr;
+ ecmult_data.g = gens->gens;
+ ecmult_data.h = gens->gens + gens->n/2;
+ CHECK(secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &pj, &zero, test_bulletproof_ecmult_callback, (void*) &ecmult_data, 2 * n));
+ secp256k1_ge_set_gej(&offs_ctx.p, &pj);
+ }
+
+ /* Check proof with no offsets or other baubles */
+ offs_ctx.parity = 0;
+ secp256k1_scalar_clear(&innp_ctx.p_offs);
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1);
+
+ /* skew P by a random amount and instruct the verifier to offset it */
+ random_scalar_order(&innp_ctx.p_offs);
+ secp256k1_gej_set_ge(&tmpj2, &gens->blinding_gen[0]);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &tmpj, &tmpj2, &innp_ctx.p_offs, &zero);
+ secp256k1_gej_add_var(&pj, &pj, &tmpj, NULL);
+ secp256k1_ge_set_gej(&offs_ctx.p, &pj);
+
+ /* wrong p_offs should fail */
+ offs_ctx.parity = 0;
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, &innp_ctx, 1, plen, 1) == 0);
+
+ secp256k1_scalar_negate(&innp_ctx.p_offs, &innp_ctx.p_offs);
+
+ offs_ctx.parity = 0;
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1);
+ /* check that verification did not trash anything */
+ offs_ctx.parity = 0;
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1);
+ /* check that adding a no-op rangeproof skew function doesn't break anything */
+ offs_ctx.parity = 0;
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1);
+
+ /* Offset P by some random point and then try to undo this in the verification */
+ secp256k1_gej_set_ge(&tmpj2, &offs_ctx.ext_pt);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &tmpj, &tmpj2, &offs_ctx.ext_sc, &zero);
+ secp256k1_gej_neg(&tmpj, &tmpj);
+ secp256k1_gej_add_ge_var(&tmpj, &tmpj, &offs_ctx.p, NULL);
+ secp256k1_ge_set_gej(&offs_ctx.p, &tmpj);
+ offs_ctx.parity = 0;
+ innp_ctx.n_extra_rangeproof_points = 2;
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1);
+
+ /* Offset each basis by some random point and try to undo this in the verification */
+ secp256k1_gej_set_infinity(&tmpj2);
+ for (j = 0; j < n; j++) {
+ size_t k;
+ /* Offset by k-times the kth G basis and (k+n)-times the kth H basis */
+ for (k = 0; k < j; k++) {
+ secp256k1_gej_add_ge_var(&tmpj2, &tmpj2, &gens->gens[j], NULL);
+ secp256k1_gej_add_ge_var(&tmpj2, &tmpj2, &gens->gens[j + gens->n/2], NULL);
+ }
+ for (k = 0; k < n; k++) {
+ secp256k1_gej_add_ge_var(&tmpj2, &tmpj2, &gens->gens[j + gens->n/2], NULL);
+ }
+ }
+ random_scalar_order(&offs_ctx.skew_sc);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &tmpj, &tmpj2, &offs_ctx.skew_sc, &zero);
+ secp256k1_gej_add_ge_var(&tmpj, &tmpj, &offs_ctx.p, NULL);
+ secp256k1_ge_set_gej(&offs_ctx.p, &tmpj);
+ secp256k1_scalar_negate(&offs_ctx.skew_sc, &offs_ctx.skew_sc);
+
+ offs_ctx.parity = 0;
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1);
+
+ /* Try to validate the same proof twice */
+{
+ test_bulletproof_offset_context offs_ctxs[2];
+ secp256k1_bulletproof_innerproduct_context innp_ctxs[2];
+ offs_ctx.parity = 1; /* set parity to 1 so the common point will be returned first, as required by the multi-proof verifier */
+ memcpy(&innp_ctxs[0], &innp_ctx, sizeof(innp_ctx));
+ memcpy(&innp_ctxs[1], &innp_ctx, sizeof(innp_ctx));
+ memcpy(&offs_ctxs[0], &offs_ctx, sizeof(offs_ctx));
+ memcpy(&offs_ctxs[1], &offs_ctx, sizeof(offs_ctx));
+ innp_ctxs[0].rangeproof_cb_data = (void *)&offs_ctxs[0];
+ innp_ctxs[1].rangeproof_cb_data = (void *)&offs_ctxs[1];
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, innp_ctxs, 2, plen, 1) == 1);
+ CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->ecmult_ctx, scratch, gens, n, innp_ctxs, 2, plen, 0) == 1);
+}
+
+ free(a_arr);
+ free(b_arr);
+ secp256k1_scratch_destroy(scratch);
+}
+
+void test_bulletproof_rangeproof(size_t nbits, size_t expected_size, const secp256k1_bulletproof_generators *gens) {
+ secp256k1_scalar blind;
+ secp256k1_scalar blind_recovered;
+ unsigned char proof[1024];
+ unsigned char proof2[1024];
+ unsigned char proof3[1024];
+ const unsigned char *proof_ptr[3];
+ size_t plen = sizeof(proof);
+ uint64_t v = 123456;
+ uint64_t v_recovered;
+ secp256k1_gej commitj;
+ secp256k1_ge commitp;
+ secp256k1_ge commitp2;
+ secp256k1_pedersen_commitment pcommit;
+ const secp256k1_ge *commitp_ptr[3];
+ secp256k1_ge value_gen[3];
+ unsigned char nonce[32] = "my kingdom for some randomness!!";
+
+ secp256k1_scratch *scratch = secp256k1_scratch_space_create(ctx, 10000000);
+
+ if (v >> nbits > 0) {
+ v = 0;
+ }
+
+ proof_ptr[0] = proof;
+ proof_ptr[1] = proof2;
+ proof_ptr[2] = proof3;
+
+ secp256k1_generator_load(&value_gen[0], &secp256k1_generator_const_g);
+ secp256k1_generator_load(&value_gen[1], &secp256k1_generator_const_g);
+ secp256k1_generator_load(&value_gen[2], &secp256k1_generator_const_h);
+ random_scalar_order(&blind);
+
+ secp256k1_pedersen_ecmult(&commitj, &blind, v, &value_gen[0], &gens->blinding_gen[0]);
+ secp256k1_ge_set_gej(&commitp, &commitj);
+ secp256k1_pedersen_ecmult(&commitj, &blind, v, &value_gen[2], &gens->blinding_gen[0]);
+ secp256k1_ge_set_gej(&commitp2, &commitj);
+ commitp_ptr[0] = commitp_ptr[1] = &commitp;
+ commitp_ptr[2] = &commitp2;
+ secp256k1_pedersen_commitment_save(&pcommit, &commitp);
+
+ CHECK(secp256k1_bulletproof_rangeproof_prove_impl(&ctx->ecmult_ctx, scratch, proof, &plen, nbits, &v, NULL, &blind, &commitp, 1, &value_gen[0], gens, nonce, NULL, 0) == 1);
+ CHECK(plen == expected_size);
+ nonce[0] ^= 1;
+ CHECK(secp256k1_bulletproof_rangeproof_prove_impl(&ctx->ecmult_ctx, scratch, proof2, &plen, nbits, &v, NULL, &blind, &commitp, 1, &value_gen[1], gens, nonce, NULL, 0) == 1);
+ CHECK(plen == expected_size);
+ nonce[0] ^= 2;
+ CHECK(secp256k1_bulletproof_rangeproof_prove_impl(&ctx->ecmult_ctx, scratch, proof3, &plen, nbits, &v, NULL, &blind, &commitp2, 1, &value_gen[2], gens, nonce, NULL, 0) == 1);
+ CHECK(plen == expected_size);
+ nonce[0] ^= 3;
+ /* Verify once */
+ CHECK(secp256k1_bulletproof_rangeproof_verify_impl(&ctx->ecmult_ctx, scratch, proof_ptr, 1, plen, nbits, NULL, commitp_ptr, 1, value_gen, gens, NULL, 0) == 1);
+ /* Verify twice at once to test batch validation */
+ CHECK(secp256k1_bulletproof_rangeproof_verify_impl(&ctx->ecmult_ctx, scratch, proof_ptr, 2, plen, nbits, NULL, commitp_ptr, 1, value_gen, gens, NULL, 0) == 1);
+ /* Verify thrice at once where one has a different asset type */
+ CHECK(secp256k1_bulletproof_rangeproof_verify_impl(&ctx->ecmult_ctx, scratch, proof_ptr, 3, plen, nbits, NULL, commitp_ptr, 1, value_gen, gens, NULL, 0) == 1);
+
+ /* Rewind */
+ CHECK(secp256k1_bulletproof_rangeproof_rewind_impl(&v_recovered, &blind_recovered, proof, plen, 0, &pcommit, &secp256k1_generator_const_g, gens->blinding_gen, nonce, NULL, 0) == 1);
+ CHECK(v_recovered == v);
+ CHECK(secp256k1_scalar_eq(&blind_recovered, &blind) == 1);
+
+ nonce[0] ^= 111;
+ CHECK(secp256k1_bulletproof_rangeproof_rewind_impl(&v_recovered, &blind_recovered, proof, plen, 0, &pcommit, &secp256k1_generator_const_g, gens->blinding_gen, nonce, NULL, 0) == 0);
+
+ secp256k1_scratch_destroy(scratch);
+}
+
+void test_bulletproof_rangeproof_aggregate(size_t nbits, size_t n_commits, size_t expected_size, const secp256k1_bulletproof_generators *gens) {
+ unsigned char proof[1024];
+ const unsigned char *proof_ptr = proof;
+ size_t plen = sizeof(proof);
+ secp256k1_scalar *blind = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, n_commits * sizeof(*blind));
+ uint64_t *v = (uint64_t *)checked_malloc(&ctx->error_callback, n_commits * sizeof(*v));
+ secp256k1_ge *commitp = (secp256k1_ge *)checked_malloc(&ctx->error_callback, n_commits * sizeof(*commitp));
+ const secp256k1_ge *constptr = commitp;
+ secp256k1_ge value_gen;
+ unsigned char commit[32] = {0};
+ unsigned char nonce[32] = "mary, mary quite contrary how do";
+ size_t i;
+
+ secp256k1_scratch *scratch = secp256k1_scratch_space_create(ctx, 10000000);
+
+ secp256k1_generator_load(&value_gen, &secp256k1_generator_const_g);
+ for (i = 0; i < n_commits; i++) {
+ secp256k1_scalar vs;
+ secp256k1_gej commitj;
+
+ v[i] = 223 * i; /* dice-roll random # */
+ if (v[i] >> nbits > 0) {
+ v[i] = 0;
+ }
+ secp256k1_scalar_set_u64(&vs, v[i]);
+ random_scalar_order(&blind[i]);
+ secp256k1_pedersen_ecmult(&commitj, &blind[i], v[i], &value_gen, &gens->blinding_gen[0]);
+ secp256k1_ge_set_gej(&commitp[i], &commitj);
+
+ secp256k1_bulletproof_update_commit(commit, &commitp[i], &value_gen);
+ }
+
+ CHECK(secp256k1_bulletproof_rangeproof_prove_impl(&ctx->ecmult_ctx, scratch, proof, &plen, nbits, v, NULL, blind, commitp, n_commits, &value_gen, gens, nonce, NULL, 0) == 1);
+ CHECK(plen == expected_size);
+ CHECK(secp256k1_bulletproof_rangeproof_verify_impl(&ctx->ecmult_ctx, scratch, &proof_ptr, 1, plen, nbits, NULL, &constptr, n_commits, &value_gen, gens, NULL, 0) == 1);
+
+ secp256k1_scratch_destroy(scratch);
+ free(commitp);
+ free(v);
+ free(blind);
+}
+
+void run_bulletproofs_tests(void) {
+ size_t i;
+
+ /* Make a ton of generators */
+ secp256k1_bulletproof_generators *gens = secp256k1_bulletproof_generators_create(ctx, &secp256k1_generator_const_h, 32768);
+ test_bulletproof_api();
+
+ /* sanity checks */
+ CHECK(secp256k1_bulletproof_innerproduct_proof_length(0) == 32); /* encoding of 1 */
+ CHECK(secp256k1_bulletproof_innerproduct_proof_length(1) == 96); /* encoding a*b, a, b */
+ CHECK(secp256k1_bulletproof_innerproduct_proof_length(2) == 160); /* dot prod, a, b, L, R, parity of L, R */
+ CHECK(secp256k1_bulletproof_innerproduct_proof_length(4) == 225); /* dot prod, a, b, a, b, L, R, parity of L, R */
+ CHECK(secp256k1_bulletproof_innerproduct_proof_length(8) == 289); /* dot prod, a, b, a, b, L, R, L, R, parity of L, R */
+
+ test_bulletproof_inner_product(0, gens);
+ test_bulletproof_inner_product(1, gens);
+ test_bulletproof_inner_product(2, gens);
+ test_bulletproof_inner_product(4, gens);
+ test_bulletproof_inner_product(8, gens);
+ for (i = 0; i < (size_t) count; i++) {
+ test_bulletproof_inner_product(32, gens);
+ test_bulletproof_inner_product(64, gens);
+ }
+ test_bulletproof_inner_product(1024, gens);
+
+ test_bulletproof_rangeproof(1, 289, gens);
+ test_bulletproof_rangeproof(2, 353, gens);
+ test_bulletproof_rangeproof(16, 546, gens);
+ test_bulletproof_rangeproof(32, 610, gens);
+ test_bulletproof_rangeproof(64, 675, gens);
+
+ test_bulletproof_rangeproof_aggregate(64, 1, 675, gens);
+ test_bulletproof_rangeproof_aggregate(8, 2, 546, gens);
+ test_bulletproof_rangeproof_aggregate(8, 4, 610, gens);
+
+ secp256k1_bulletproof_generators_destroy(ctx, gens);
+}
+#undef MAX_WIDTH
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/util.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/util.h
new file mode 100644
index 0000000..b1b4d11
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/bulletproofs/util.h
@@ -0,0 +1,116 @@
+/**********************************************************************
+ * Copyright (c) 2018 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_BULLETPROOF_UTIL
+#define SECP256K1_MODULE_BULLETPROOF_UTIL
+
+/* floor(log2(n)) which returns 0 for 0, since this is used to estimate proof sizes */
+SECP256K1_INLINE static size_t secp256k1_floor_lg(size_t n) {
+ switch (n) {
+ case 0: return 0;
+ case 1: return 0;
+ case 2: return 1;
+ case 3: return 1;
+ case 4: return 2;
+ case 5: return 2;
+ case 6: return 2;
+ case 7: return 2;
+ case 8: return 3;
+ default: {
+ size_t i = 0;
+ while (n > 1) {
+ n /= 2;
+ i++;
+ }
+ return i;
+ }
+ }
+}
+
+static void secp256k1_scalar_dot_product(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, size_t n) {
+ secp256k1_scalar_clear(r);
+ while(n--) {
+ secp256k1_scalar term;
+ secp256k1_scalar_mul(&term, &a[n], &b[n]);
+ secp256k1_scalar_add(r, r, &term);
+ }
+}
+
+static void secp256k1_scalar_inverse_all_var(secp256k1_scalar *r, const secp256k1_scalar *a, size_t len) {
+ secp256k1_scalar u;
+ size_t i;
+ if (len < 1) {
+ return;
+ }
+
+ VERIFY_CHECK((r + len <= a) || (a + len <= r));
+
+ r[0] = a[0];
+
+ i = 0;
+ while (++i < len) {
+ secp256k1_scalar_mul(&r[i], &r[i - 1], &a[i]);
+ }
+
+ secp256k1_scalar_inverse_var(&u, &r[--i]);
+
+ while (i > 0) {
+ size_t j = i--;
+ secp256k1_scalar_mul(&r[j], &r[i], &u);
+ secp256k1_scalar_mul(&u, &u, &a[j]);
+ }
+
+ r[0] = u;
+}
+
+SECP256K1_INLINE static void secp256k1_bulletproof_serialize_points(unsigned char *out, secp256k1_ge *pt, size_t n) {
+ const size_t bitveclen = (n + 7) / 8;
+ size_t i;
+
+ memset(out, 0, bitveclen);
+ for (i = 0; i < n; i++) {
+ secp256k1_fe pointx;
+ pointx = pt[i].x;
+ secp256k1_fe_normalize(&pointx);
+ secp256k1_fe_get_b32(&out[bitveclen + i*32], &pointx);
+ if (!secp256k1_fe_is_quad_var(&pt[i].y)) {
+ out[i/8] |= (1ull << (i % 8));
+ }
+ }
+}
+
+SECP256K1_INLINE static void secp256k1_bulletproof_deserialize_point(secp256k1_ge *pt, const unsigned char *data, size_t i, size_t n) {
+ const size_t bitveclen = (n + 7) / 8;
+ const size_t offset = bitveclen + i*32;
+ secp256k1_fe fe;
+
+ secp256k1_fe_set_b32(&fe, &data[offset]);
+ secp256k1_ge_set_xquad(pt, &fe);
+ if (data[i / 8] & (1 << (i % 8))) {
+ secp256k1_ge_neg(pt, pt);
+ }
+}
+
+static void secp256k1_bulletproof_update_commit(unsigned char *commit, const secp256k1_ge *lpt, const secp256k1_ge *rpt) {
+ secp256k1_fe pointx;
+ secp256k1_sha256 sha256;
+ unsigned char lrparity;
+ lrparity = (!secp256k1_fe_is_quad_var(&lpt->y) << 1) + !secp256k1_fe_is_quad_var(&rpt->y);
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_write(&sha256, &lrparity, 1);
+ pointx = lpt->x;
+ secp256k1_fe_normalize(&pointx);
+ secp256k1_fe_get_b32(commit, &pointx);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ pointx = rpt->x;
+ secp256k1_fe_normalize(&pointx);
+ secp256k1_fe_get_b32(commit, &pointx);
+ secp256k1_sha256_write(&sha256, commit, 32);
+ secp256k1_sha256_finalize(&sha256, commit);
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/.main_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/.main_impl.h.swp
new file mode 100644
index 0000000..047e28c
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/.main_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/Makefile.am.include b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/Makefile.am.include
new file mode 100644
index 0000000..132d6fe
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/Makefile.am.include
@@ -0,0 +1,4 @@
+include_HEADERS += include/secp256k1_commitment.h
+noinst_HEADERS += src/modules/commitment/main_impl.h
+noinst_HEADERS += src/modules/commitment/pedersen_impl.h
+noinst_HEADERS += src/modules/commitment/tests_impl.h
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/main_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/main_impl.h
new file mode 100644
index 0000000..ffaec06
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/main_impl.h
@@ -0,0 +1,186 @@
+/**********************************************************************
+ * Copyright (c) 2014-2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_COMMITMENT_MAIN
+#define SECP256K1_MODULE_COMMITMENT_MAIN
+
+#include "group.h"
+
+#include "modules/commitment/pedersen_impl.h"
+
+static void secp256k1_pedersen_commitment_load(secp256k1_ge* ge, const secp256k1_pedersen_commitment* commit) {
+ secp256k1_fe fe;
+ secp256k1_fe_set_b32(&fe, &commit->data[1]);
+ secp256k1_ge_set_xquad(ge, &fe);
+ if (commit->data[0] & 1) {
+ secp256k1_ge_neg(ge, ge);
+ }
+}
+
+static void secp256k1_pedersen_commitment_save(secp256k1_pedersen_commitment* commit, secp256k1_ge* ge) {
+ secp256k1_fe_normalize(&ge->x);
+ secp256k1_fe_get_b32(&commit->data[1], &ge->x);
+ commit->data[0] = 9 ^ secp256k1_fe_is_quad_var(&ge->y);
+}
+
+int secp256k1_pedersen_commitment_parse(const secp256k1_context* ctx, secp256k1_pedersen_commitment* commit, const unsigned char *input) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(commit != NULL);
+ ARG_CHECK(input != NULL);
+ (void) ctx;
+ if ((input[0] & 0xFE) != 8) {
+ return 0;
+ }
+ memcpy(commit->data, input, sizeof(commit->data));
+ return 1;
+}
+
+int secp256k1_pedersen_commitment_serialize(const secp256k1_context* ctx, unsigned char *output, const secp256k1_pedersen_commitment* commit) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output != NULL);
+ ARG_CHECK(commit != NULL);
+ memcpy(output, commit->data, sizeof(commit->data));
+ return 1;
+}
+
+/* Generates a pedersen commitment: *commit = blind * G + value * G2. The blinding factor is 32 bytes.*/
+int secp256k1_pedersen_commit(const secp256k1_context* ctx, secp256k1_pedersen_commitment *commit, const unsigned char *blind, uint64_t value, const secp256k1_generator* value_gen, const secp256k1_generator* blind_gen) {
+ secp256k1_ge value_genp;
+ secp256k1_ge blind_genp;
+ secp256k1_gej rj;
+ secp256k1_ge r;
+ secp256k1_scalar sec;
+ int overflow;
+ int ret = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(commit != NULL);
+ ARG_CHECK(blind != NULL);
+ ARG_CHECK(value_gen != NULL);
+ ARG_CHECK(blind_gen != NULL);
+ secp256k1_generator_load(&value_genp, value_gen);
+ secp256k1_generator_load(&blind_genp, blind_gen);
+ secp256k1_scalar_set_b32(&sec, blind, &overflow);
+ if (!overflow) {
+ secp256k1_pedersen_ecmult(&rj, &sec, value, &value_genp, &blind_genp);
+ if (!secp256k1_gej_is_infinity(&rj)) {
+ secp256k1_ge_set_gej(&r, &rj);
+ secp256k1_pedersen_commitment_save(commit, &r);
+ ret = 1;
+ }
+ secp256k1_gej_clear(&rj);
+ secp256k1_ge_clear(&r);
+ }
+ secp256k1_scalar_clear(&sec);
+ return ret;
+}
+
+/** Takes a list of n pointers to 32 byte blinding values, the first negs of which are treated with positive sign and the rest
+ * negative, then calculates an additional blinding value that adds to zero.
+ */
+int secp256k1_pedersen_blind_sum(const secp256k1_context* ctx, unsigned char *blind_out, const unsigned char * const *blinds, size_t n, size_t npositive) {
+ secp256k1_scalar acc;
+ secp256k1_scalar x;
+ size_t i;
+ int overflow;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(blind_out != NULL);
+ ARG_CHECK(blinds != NULL);
+ ARG_CHECK(npositive <= n);
+ (void) ctx;
+ secp256k1_scalar_set_int(&acc, 0);
+ for (i = 0; i < n; i++) {
+ secp256k1_scalar_set_b32(&x, blinds[i], &overflow);
+ if (overflow) {
+ return 0;
+ }
+ if (i >= npositive) {
+ secp256k1_scalar_negate(&x, &x);
+ }
+ secp256k1_scalar_add(&acc, &acc, &x);
+ }
+ secp256k1_scalar_get_b32(blind_out, &acc);
+ secp256k1_scalar_clear(&acc);
+ secp256k1_scalar_clear(&x);
+ return 1;
+}
+
+/* Takes two lists of commitments and sums the first set and subtracts the second and verifies that they sum to excess. */
+int secp256k1_pedersen_verify_tally(const secp256k1_context* ctx, const secp256k1_pedersen_commitment * const* pos, size_t n_pos, const secp256k1_pedersen_commitment * const* neg, size_t n_neg) {
+ secp256k1_gej accj;
+ secp256k1_ge add;
+ size_t i;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(!n_pos || (pos != NULL));
+ ARG_CHECK(!n_neg || (neg != NULL));
+ (void) ctx;
+ secp256k1_gej_set_infinity(&accj);
+ for (i = 0; i < n_pos; i++) {
+ secp256k1_pedersen_commitment_load(&add, neg[i]);
+ secp256k1_gej_add_ge_var(&accj, &accj, &add, NULL);
+ }
+ secp256k1_gej_neg(&accj, &accj);
+ for (i = 0; i < n_neg; i++) {
+ secp256k1_pedersen_commitment_load(&add, pos[i]);
+ secp256k1_gej_add_ge_var(&accj, &accj, &add, NULL);
+ }
+ return secp256k1_gej_is_infinity(&accj);
+}
+
+int secp256k1_pedersen_blind_generator_blind_sum(const secp256k1_context* ctx, const uint64_t *value, const unsigned char* const* generator_blind, unsigned char* const* blinding_factor, size_t n_total, size_t n_inputs) {
+ secp256k1_scalar sum;
+ secp256k1_scalar tmp;
+ size_t i;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(n_total == 0 || value != NULL);
+ ARG_CHECK(n_total == 0 || generator_blind != NULL);
+ ARG_CHECK(n_total == 0 || blinding_factor != NULL);
+ ARG_CHECK(n_total > n_inputs);
+ (void) ctx;
+
+ if (n_total == 0) {
+ return 1;
+ }
+
+ secp256k1_scalar_set_int(&sum, 0);
+ for (i = 0; i < n_total; i++) {
+ int overflow = 0;
+ secp256k1_scalar addend;
+ secp256k1_scalar_set_u64(&addend, value[i]); /* s = v */
+
+ secp256k1_scalar_set_b32(&tmp, generator_blind[i], &overflow);
+ if (overflow == 1) {
+ secp256k1_scalar_clear(&tmp);
+ secp256k1_scalar_clear(&addend);
+ secp256k1_scalar_clear(&sum);
+ return 0;
+ }
+ secp256k1_scalar_mul(&addend, &addend, &tmp); /* s = vr */
+
+ secp256k1_scalar_set_b32(&tmp, blinding_factor[i], &overflow);
+ if (overflow == 1) {
+ secp256k1_scalar_clear(&tmp);
+ secp256k1_scalar_clear(&addend);
+ secp256k1_scalar_clear(&sum);
+ return 0;
+ }
+ secp256k1_scalar_add(&addend, &addend, &tmp); /* s = vr + r' */
+ secp256k1_scalar_cond_negate(&addend, i < n_inputs); /* s is negated if it's an input */
+ secp256k1_scalar_add(&sum, &sum, &addend); /* sum += s */
+ secp256k1_scalar_clear(&addend);
+ }
+
+ /* Right now tmp has the last pedersen blinding factor. Subtract the sum from it. */
+ secp256k1_scalar_negate(&sum, &sum);
+ secp256k1_scalar_add(&tmp, &tmp, &sum);
+ secp256k1_scalar_get_b32(blinding_factor[n_total - 1], &tmp);
+
+ secp256k1_scalar_clear(&tmp);
+ secp256k1_scalar_clear(&sum);
+ return 1;
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/pedersen_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/pedersen_impl.h
new file mode 100644
index 0000000..a6d9dfb
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/pedersen_impl.h
@@ -0,0 +1,38 @@
+/***********************************************************************
+ * Copyright (c) 2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php. *
+ ***********************************************************************/
+
+#ifndef SECP256K1_MODULE_COMMITMENT_PEDERSEN
+#define SECP256K1_MODULE_COMMITMENT_PEDERSEN
+
+#include
+
+#include "ecmult_const.h"
+#include "group.h"
+#include "scalar.h"
+
+/* sec * G + value * G2. */
+SECP256K1_INLINE static void secp256k1_pedersen_ecmult(secp256k1_gej *rj, const secp256k1_scalar *sec, uint64_t value, const secp256k1_ge* value_gen, const secp256k1_ge* blind_gen) {
+ secp256k1_scalar vs;
+ secp256k1_gej bj;
+ secp256k1_ge bp;
+
+ secp256k1_scalar_set_u64(&vs, value);
+ secp256k1_ecmult_const(rj, value_gen, &vs, 64);
+ secp256k1_ecmult_const(&bj, blind_gen, sec, 256);
+
+ /* zero blinding factor indicates that we are not trying to be zero-knowledge,
+ * so not being constant-time in this case is OK. */
+ if (!secp256k1_gej_is_infinity(&bj)) {
+ secp256k1_ge_set_gej(&bp, &bj);
+ secp256k1_gej_add_ge(rj, rj, &bp);
+ }
+
+ secp256k1_gej_clear(&bj);
+ secp256k1_ge_clear(&bp);
+ secp256k1_scalar_clear(&vs);
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/tests_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/tests_impl.h
new file mode 100644
index 0000000..fe4f925
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/commitment/tests_impl.h
@@ -0,0 +1,230 @@
+/**********************************************************************
+ * Copyright (c) 2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_COMMITMENT_TESTS
+#define SECP256K1_MODULE_COMMITMENT_TESTS
+
+#include
+
+#include "group.h"
+#include "scalar.h"
+#include "testrand.h"
+#include "util.h"
+
+#include "include/secp256k1_commitment.h"
+
+static void test_commitment_api(void) {
+ secp256k1_pedersen_commitment commit;
+ const secp256k1_pedersen_commitment *commit_ptr = &commit;
+ unsigned char blind[32];
+ unsigned char blind_out[32];
+ const unsigned char *blind_ptr = blind;
+ unsigned char *blind_out_ptr = blind_out;
+ uint64_t val = secp256k1_rand32();
+
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ int32_t ecount;
+
+ secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
+
+ secp256k1_rand256(blind);
+ CHECK(secp256k1_pedersen_commit(none, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_pedersen_commit(vrfy, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) != 0);
+ CHECK(ecount == 2);
+
+ CHECK(secp256k1_pedersen_commit(sign, NULL, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_pedersen_commit(sign, &commit, NULL, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, NULL, &secp256k1_generator_const_g) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, NULL) == 0);
+ CHECK(ecount == 6);
+
+ CHECK(secp256k1_pedersen_blind_sum(none, blind_out, &blind_ptr, 1, 1) != 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_pedersen_blind_sum(none, NULL, &blind_ptr, 1, 1) == 0);
+ CHECK(ecount == 7);
+ CHECK(secp256k1_pedersen_blind_sum(none, blind_out, NULL, 1, 1) == 0);
+ CHECK(ecount == 8);
+ CHECK(secp256k1_pedersen_blind_sum(none, blind_out, &blind_ptr, 0, 1) == 0);
+ CHECK(ecount == 9);
+ CHECK(secp256k1_pedersen_blind_sum(none, blind_out, &blind_ptr, 0, 0) != 0);
+ CHECK(ecount == 9);
+
+ CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) != 0);
+ CHECK(secp256k1_pedersen_verify_tally(none, &commit_ptr, 1, &commit_ptr, 1) != 0);
+ CHECK(secp256k1_pedersen_verify_tally(none, NULL, 0, &commit_ptr, 1) == 0);
+ CHECK(secp256k1_pedersen_verify_tally(none, &commit_ptr, 1, NULL, 0) == 0);
+ CHECK(secp256k1_pedersen_verify_tally(none, NULL, 0, NULL, 0) != 0);
+ CHECK(ecount == 9);
+ CHECK(secp256k1_pedersen_verify_tally(none, NULL, 1, &commit_ptr, 1) == 0);
+ CHECK(ecount == 10);
+ CHECK(secp256k1_pedersen_verify_tally(none, &commit_ptr, 1, NULL, 1) == 0);
+ CHECK(ecount == 11);
+
+ CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, &blind_ptr, &blind_out_ptr, 1, 0) != 0);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, &blind_ptr, &blind_out_ptr, 1, 1) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, &blind_ptr, &blind_out_ptr, 0, 0) == 0);
+ CHECK(ecount == 13);
+ CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, NULL, &blind_ptr, &blind_out_ptr, 1, 0) == 0);
+ CHECK(ecount == 14);
+ CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, NULL, &blind_out_ptr, 1, 0) == 0);
+ CHECK(ecount == 15);
+ CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, &blind_ptr, NULL, 1, 0) == 0);
+ CHECK(ecount == 16);
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+}
+
+static void test_pedersen(void) {
+ secp256k1_pedersen_commitment commits[19];
+ const secp256k1_pedersen_commitment *cptr[19];
+ unsigned char blinds[32*19];
+ const unsigned char *bptr[19];
+ secp256k1_scalar s;
+ uint64_t values[19];
+ int64_t totalv;
+ int i;
+ int inputs;
+ int outputs;
+ int total;
+ inputs = (secp256k1_rand32() & 7) + 1;
+ outputs = (secp256k1_rand32() & 7) + 2;
+ total = inputs + outputs;
+ for (i = 0; i < 19; i++) {
+ cptr[i] = &commits[i];
+ bptr[i] = &blinds[i * 32];
+ }
+ totalv = 0;
+ for (i = 0; i < inputs; i++) {
+ values[i] = secp256k1_rands64(0, INT64_MAX - totalv);
+ totalv += values[i];
+ }
+ for (i = 0; i < outputs - 1; i++) {
+ values[i + inputs] = secp256k1_rands64(0, totalv);
+ totalv -= values[i + inputs];
+ }
+ values[total - 1] = totalv;
+
+ for (i = 0; i < total - 1; i++) {
+ random_scalar_order(&s);
+ secp256k1_scalar_get_b32(&blinds[i * 32], &s);
+ }
+ CHECK(secp256k1_pedersen_blind_sum(ctx, &blinds[(total - 1) * 32], bptr, total - 1, inputs));
+ for (i = 0; i < total; i++) {
+ CHECK(secp256k1_pedersen_commit(ctx, &commits[i], &blinds[i * 32], values[i], &secp256k1_generator_const_h, &secp256k1_generator_const_g));
+ }
+ CHECK(secp256k1_pedersen_verify_tally(ctx, cptr, inputs, &cptr[inputs], outputs));
+ CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[inputs], outputs, cptr, inputs));
+ if (inputs > 0 && values[0] > 0) {
+ CHECK(!secp256k1_pedersen_verify_tally(ctx, cptr, inputs - 1, &cptr[inputs], outputs));
+ }
+ random_scalar_order(&s);
+ for (i = 0; i < 4; i++) {
+ secp256k1_scalar_get_b32(&blinds[i * 32], &s);
+ }
+ values[0] = INT64_MAX;
+ values[1] = 0;
+ values[2] = 1;
+ for (i = 0; i < 3; i++) {
+ CHECK(secp256k1_pedersen_commit(ctx, &commits[i], &blinds[i * 32], values[i], &secp256k1_generator_const_h, &secp256k1_generator_const_g));
+ }
+ CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[0], 1, &cptr[0], 1));
+ CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[1], 1, &cptr[1], 1));
+}
+
+#define MAX_N_GENS 30
+void test_multiple_generators(void) {
+ const size_t n_inputs = (secp256k1_rand32() % (MAX_N_GENS / 2)) + 1;
+ const size_t n_outputs = (secp256k1_rand32() % (MAX_N_GENS / 2)) + 1;
+ const size_t n_generators = n_inputs + n_outputs;
+ unsigned char *generator_blind[MAX_N_GENS];
+ unsigned char *pedersen_blind[MAX_N_GENS];
+ secp256k1_generator generator[MAX_N_GENS];
+ secp256k1_pedersen_commitment commit[MAX_N_GENS];
+ const secp256k1_pedersen_commitment *commit_ptr[MAX_N_GENS];
+ size_t i;
+ int64_t total_value;
+ uint64_t value[MAX_N_GENS];
+
+ secp256k1_scalar s;
+
+ unsigned char generator_seed[32];
+ random_scalar_order(&s);
+ secp256k1_scalar_get_b32(generator_seed, &s);
+ /* Create all the needed generators */
+ for (i = 0; i < n_generators; i++) {
+ generator_blind[i] = (unsigned char*) malloc(32);
+ pedersen_blind[i] = (unsigned char*) malloc(32);
+
+ random_scalar_order(&s);
+ secp256k1_scalar_get_b32(generator_blind[i], &s);
+ random_scalar_order(&s);
+ secp256k1_scalar_get_b32(pedersen_blind[i], &s);
+
+ CHECK(secp256k1_generator_generate_blinded(ctx, &generator[i], generator_seed, generator_blind[i]));
+
+ commit_ptr[i] = &commit[i];
+ }
+
+ /* Compute all the values -- can be positive or negative */
+ total_value = 0;
+ for (i = 0; i < n_outputs; i++) {
+ value[n_inputs + i] = secp256k1_rands64(0, INT64_MAX - total_value);
+ total_value += value[n_inputs + i];
+ }
+ for (i = 0; i < n_inputs - 1; i++) {
+ value[i] = secp256k1_rands64(0, total_value);
+ total_value -= value[i];
+ }
+ value[i] = total_value;
+
+ /* Correct for blinding factors and do the commitments */
+ CHECK(secp256k1_pedersen_blind_generator_blind_sum(ctx, value, (const unsigned char * const *) generator_blind, pedersen_blind, n_generators, n_inputs));
+ for (i = 0; i < n_generators; i++) {
+ CHECK(secp256k1_pedersen_commit(ctx, &commit[i], pedersen_blind[i], value[i], &generator[i], &secp256k1_generator_const_h));
+ }
+
+ /* Verify */
+ CHECK(secp256k1_pedersen_verify_tally(ctx, &commit_ptr[0], n_inputs, &commit_ptr[n_inputs], n_outputs));
+
+ /* Cleanup */
+ for (i = 0; i < n_generators; i++) {
+ free(generator_blind[i]);
+ free(pedersen_blind[i]);
+ }
+}
+#undef MAX_N_GENS
+
+void run_commitment_tests(void) {
+ int i;
+ test_commitment_api();
+ for (i = 0; i < 10*count; i++) {
+ test_pedersen();
+ }
+ test_multiple_generators();
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include
new file mode 100644
index 0000000..e3088b4
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include
@@ -0,0 +1,8 @@
+include_HEADERS += include/secp256k1_ecdh.h
+noinst_HEADERS += src/modules/ecdh/main_impl.h
+noinst_HEADERS += src/modules/ecdh/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_ecdh
+bench_ecdh_SOURCES = src/bench_ecdh.c
+bench_ecdh_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
+endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h
new file mode 100644
index 0000000..df3ec5c
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h
@@ -0,0 +1,54 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_ECDH_MAIN_H
+#define SECP256K1_MODULE_ECDH_MAIN_H
+
+#include "include/secp256k1_ecdh.h"
+#include "ecmult_const_impl.h"
+
+int secp256k1_ecdh(const secp256k1_context* ctx, unsigned char *result, const secp256k1_pubkey *point, const unsigned char *scalar) {
+ int ret = 0;
+ int overflow = 0;
+ secp256k1_gej res;
+ secp256k1_ge pt;
+ secp256k1_scalar s;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(result != NULL);
+ ARG_CHECK(point != NULL);
+ ARG_CHECK(scalar != NULL);
+
+ secp256k1_pubkey_load(ctx, &pt, point);
+ secp256k1_scalar_set_b32(&s, scalar, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&s)) {
+ ret = 0;
+ } else {
+ unsigned char x[32];
+ unsigned char y[1];
+ secp256k1_sha256 sha;
+
+ secp256k1_ecmult_const(&res, &pt, &s, 256);
+ secp256k1_ge_set_gej(&pt, &res);
+ /* Compute a hash of the point in compressed form
+ * Note we cannot use secp256k1_eckey_pubkey_serialize here since it does not
+ * expect its output to be secret and has a timing sidechannel. */
+ secp256k1_fe_normalize(&pt.x);
+ secp256k1_fe_normalize(&pt.y);
+ secp256k1_fe_get_b32(x, &pt.x);
+ y[0] = 0x02 | secp256k1_fe_is_odd(&pt.y);
+
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, y, sizeof(y));
+ secp256k1_sha256_write(&sha, x, sizeof(x));
+ secp256k1_sha256_finalize(&sha, result);
+ ret = 1;
+ }
+
+ secp256k1_scalar_clear(&s);
+ return ret;
+}
+
+#endif /* SECP256K1_MODULE_ECDH_MAIN_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h
new file mode 100644
index 0000000..0c53f8e
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h
@@ -0,0 +1,105 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_ECDH_TESTS_H
+#define SECP256K1_MODULE_ECDH_TESTS_H
+
+void test_ecdh_api(void) {
+ /* Setup context that just counts errors */
+ secp256k1_context *tctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_pubkey point;
+ unsigned char res[32];
+ unsigned char s_one[32] = { 0 };
+ int32_t ecount = 0;
+ s_one[31] = 1;
+
+ secp256k1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount);
+ CHECK(secp256k1_ec_pubkey_create(tctx, &point, s_one) == 1);
+
+ /* Check all NULLs are detected */
+ CHECK(secp256k1_ecdh(tctx, res, &point, s_one) == 1);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_ecdh(tctx, NULL, &point, s_one) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdh(tctx, res, NULL, s_one) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdh(tctx, res, &point, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdh(tctx, res, &point, s_one) == 1);
+ CHECK(ecount == 3);
+
+ /* Cleanup */
+ secp256k1_context_destroy(tctx);
+}
+
+void test_ecdh_generator_basepoint(void) {
+ unsigned char s_one[32] = { 0 };
+ secp256k1_pubkey point[2];
+ int i;
+
+ s_one[31] = 1;
+ /* Check against pubkey creation when the basepoint is the generator */
+ for (i = 0; i < 100; ++i) {
+ secp256k1_sha256 sha;
+ unsigned char s_b32[32];
+ unsigned char output_ecdh[32];
+ unsigned char output_ser[32];
+ unsigned char point_ser[33];
+ size_t point_ser_len = sizeof(point_ser);
+ secp256k1_scalar s;
+
+ random_scalar_order(&s);
+ secp256k1_scalar_get_b32(s_b32, &s);
+
+ /* compute using ECDH function */
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point[0], s_one) == 1);
+ CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32) == 1);
+ /* compute "explicitly" */
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point[1], s_b32) == 1);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(point_ser_len == sizeof(point_ser));
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, point_ser, point_ser_len);
+ secp256k1_sha256_finalize(&sha, output_ser);
+ /* compare */
+ CHECK(memcmp(output_ecdh, output_ser, sizeof(output_ser)) == 0);
+ }
+}
+
+void test_bad_scalar(void) {
+ unsigned char s_zero[32] = { 0 };
+ unsigned char s_overflow[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41
+ };
+ unsigned char s_rand[32] = { 0 };
+ unsigned char output[32];
+ secp256k1_scalar rand;
+ secp256k1_pubkey point;
+
+ /* Create random point */
+ random_scalar_order(&rand);
+ secp256k1_scalar_get_b32(s_rand, &rand);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point, s_rand) == 1);
+
+ /* Try to multiply it by bad values */
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_zero) == 0);
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 0);
+ /* ...and a good one */
+ s_overflow[31] -= 1;
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 1);
+}
+
+void run_ecdh_tests(void) {
+ test_ecdh_api();
+ test_ecdh_generator_basepoint();
+ test_bad_scalar();
+}
+
+#endif /* SECP256K1_MODULE_ECDH_TESTS_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/.main_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/.main_impl.h.swp
new file mode 100644
index 0000000..85d9619
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/.main_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/.tests_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/.tests_impl.h.swp
new file mode 100644
index 0000000..82b7474
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/.tests_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/Makefile.am.include b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/Makefile.am.include
new file mode 100644
index 0000000..69933e9
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/Makefile.am.include
@@ -0,0 +1,9 @@
+include_HEADERS += include/secp256k1_generator.h
+noinst_HEADERS += src/modules/generator/main_impl.h
+noinst_HEADERS += src/modules/generator/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_generator
+bench_generator_SOURCES = src/bench_generator.c
+bench_generator_LDADD = libsecp256k1.la $(SECP_LIBS)
+bench_generator_LDFLAGS = -static
+endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/main_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/main_impl.h
new file mode 100644
index 0000000..2c226da
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/main_impl.h
@@ -0,0 +1,232 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra & Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_GENERATOR_MAIN
+#define SECP256K1_MODULE_GENERATOR_MAIN
+
+#include
+
+#include "field.h"
+#include "group.h"
+#include "hash.h"
+#include "scalar.h"
+
+/** Standard secp256k1 generator */
+const secp256k1_generator secp256k1_generator_const_g = {
+ { 0x0a,
+ 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac,
+ 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07,
+ 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9,
+ 0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98
+ }
+};
+
+/** Alternate secp256k1 generator, used in Elements Alpha.
+ * Computed as the hash of the above G, DER-encoded with 0x04 (uncompressed pubkey) as its flag byte.
+ * import hashlib
+ * C = EllipticCurve ([F (0), F (7)])
+ * G_bytes = '0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'.decode('hex')
+ * H = C.lift_x(int(hashlib.sha256(G_bytes).hexdigest(),16))
+ */
+const secp256k1_generator secp256k1_generator_const_h = {
+ { 0x0b,
+ 0x50, 0x92, 0x9b, 0x74, 0xc1, 0xa0, 0x49, 0x54,
+ 0xb7, 0x8b, 0x4b, 0x60, 0x35, 0xe9, 0x7a, 0x5e,
+ 0x07, 0x8a, 0x5a, 0x0f, 0x28, 0xec, 0x96, 0xd5,
+ 0x47, 0xbf, 0xee, 0x9a, 0xce, 0x80, 0x3a, 0xc0
+ }
+};
+
+static void secp256k1_generator_load(secp256k1_ge* ge, const secp256k1_generator* gen) {
+ secp256k1_fe fe;
+ secp256k1_fe_set_b32(&fe, &gen->data[1]);
+ secp256k1_ge_set_xquad(ge, &fe);
+ if (gen->data[0] & 1) {
+ secp256k1_ge_neg(ge, ge);
+ }
+}
+
+static void secp256k1_generator_save(secp256k1_generator* commit, secp256k1_ge* ge) {
+ secp256k1_fe_normalize(&ge->x);
+ secp256k1_fe_get_b32(&commit->data[1], &ge->x);
+ commit->data[0] = 11 ^ secp256k1_fe_is_quad_var(&ge->y);
+}
+
+int secp256k1_generator_parse(const secp256k1_context* ctx, secp256k1_generator* gen, const unsigned char *input) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(gen != NULL);
+ ARG_CHECK(input != NULL);
+ if ((input[0] & 0xFE) != 10) {
+ return 0;
+ }
+ memcpy(gen->data, input, sizeof(gen->data));
+ return 1;
+}
+
+int secp256k1_generator_serialize(const secp256k1_context* ctx, unsigned char *output, const secp256k1_generator* gen) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output != NULL);
+ ARG_CHECK(gen != NULL);
+ memcpy(output, gen->data, sizeof(gen->data));
+ return 1;
+}
+
+static void shallue_van_de_woestijne(secp256k1_ge* ge, const secp256k1_fe* t) {
+ /* Implements the algorithm from:
+ * Indifferentiable Hashing to Barreto-Naehrig Curves
+ * Pierre-Alain Fouque and Mehdi Tibouchi
+ * Latincrypt 2012
+ */
+
+ /* Basic algorithm:
+
+ c = sqrt(-3)
+ d = (c - 1)/2
+
+ w = c * t / (1 + b + t^2) [with b = 7]
+ x1 = d - t*w
+ x2 = -(x1 + 1)
+ x3 = 1 + 1/w^2
+
+ To avoid the 2 divisions, compute the above in numerator/denominator form:
+ wn = c * t
+ wd = 1 + 7 + t^2
+ x1n = d*wd - t*wn
+ x1d = wd
+ x2n = -(x1n + wd)
+ x2d = wd
+ x3n = wd^2 + c^2 + t^2
+ x3d = (c * t)^2
+
+ The joint denominator j = wd * c^2 * t^2, and
+ 1 / x1d = 1/j * c^2 * t^2
+ 1 / x2d = x3d = 1/j * wd
+ */
+
+ static const secp256k1_fe c = SECP256K1_FE_CONST(0x0a2d2ba9, 0x3507f1df, 0x233770c2, 0xa797962c, 0xc61f6d15, 0xda14ecd4, 0x7d8d27ae, 0x1cd5f852);
+ static const secp256k1_fe d = SECP256K1_FE_CONST(0x851695d4, 0x9a83f8ef, 0x919bb861, 0x53cbcb16, 0x630fb68a, 0xed0a766a, 0x3ec693d6, 0x8e6afa40);
+ static const secp256k1_fe b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7);
+ static const secp256k1_fe b_plus_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 8);
+
+ secp256k1_fe wn, wd, x1n, x2n, x3n, x3d, jinv, tmp, x1, x2, x3, alphain, betain, gammain, y1, y2, y3;
+ int alphaquad, betaquad;
+
+ secp256k1_fe_mul(&wn, &c, t); /* mag 1 */
+ secp256k1_fe_sqr(&wd, t); /* mag 1 */
+ secp256k1_fe_add(&wd, &b_plus_one); /* mag 2 */
+ secp256k1_fe_mul(&tmp, t, &wn); /* mag 1 */
+ secp256k1_fe_negate(&tmp, &tmp, 1); /* mag 2 */
+ secp256k1_fe_mul(&x1n, &d, &wd); /* mag 1 */
+ secp256k1_fe_add(&x1n, &tmp); /* mag 3 */
+ x2n = x1n; /* mag 3 */
+ secp256k1_fe_add(&x2n, &wd); /* mag 5 */
+ secp256k1_fe_negate(&x2n, &x2n, 5); /* mag 6 */
+ secp256k1_fe_mul(&x3d, &c, t); /* mag 1 */
+ secp256k1_fe_sqr(&x3d, &x3d); /* mag 1 */
+ secp256k1_fe_sqr(&x3n, &wd); /* mag 1 */
+ secp256k1_fe_add(&x3n, &x3d); /* mag 2 */
+ secp256k1_fe_mul(&jinv, &x3d, &wd); /* mag 1 */
+ secp256k1_fe_inv(&jinv, &jinv); /* mag 1 */
+ secp256k1_fe_mul(&x1, &x1n, &x3d); /* mag 1 */
+ secp256k1_fe_mul(&x1, &x1, &jinv); /* mag 1 */
+ secp256k1_fe_mul(&x2, &x2n, &x3d); /* mag 1 */
+ secp256k1_fe_mul(&x2, &x2, &jinv); /* mag 1 */
+ secp256k1_fe_mul(&x3, &x3n, &wd); /* mag 1 */
+ secp256k1_fe_mul(&x3, &x3, &jinv); /* mag 1 */
+
+ secp256k1_fe_sqr(&alphain, &x1); /* mag 1 */
+ secp256k1_fe_mul(&alphain, &alphain, &x1); /* mag 1 */
+ secp256k1_fe_add(&alphain, &b); /* mag 2 */
+ secp256k1_fe_sqr(&betain, &x2); /* mag 1 */
+ secp256k1_fe_mul(&betain, &betain, &x2); /* mag 1 */
+ secp256k1_fe_add(&betain, &b); /* mag 2 */
+ secp256k1_fe_sqr(&gammain, &x3); /* mag 1 */
+ secp256k1_fe_mul(&gammain, &gammain, &x3); /* mag 1 */
+ secp256k1_fe_add(&gammain, &b); /* mag 2 */
+
+ alphaquad = secp256k1_fe_sqrt(&y1, &alphain);
+ betaquad = secp256k1_fe_sqrt(&y2, &betain);
+ secp256k1_fe_sqrt(&y3, &gammain);
+
+ secp256k1_fe_cmov(&x1, &x2, (!alphaquad) & betaquad);
+ secp256k1_fe_cmov(&y1, &y2, (!alphaquad) & betaquad);
+ secp256k1_fe_cmov(&x1, &x3, (!alphaquad) & !betaquad);
+ secp256k1_fe_cmov(&y1, &y3, (!alphaquad) & !betaquad);
+
+ secp256k1_ge_set_xy(ge, &x1, &y1);
+
+ /* The linked algorithm from the paper uses the Jacobi symbol of t to
+ * determine the Jacobi symbol of the produced y coordinate. Since the
+ * rest of the algorithm only uses t^2, we can safely use another criterion
+ * as long as negation of t results in negation of the y coordinate. Here
+ * we choose to use t's oddness, as it is faster to determine. */
+ secp256k1_fe_negate(&tmp, &ge->y, 1);
+ secp256k1_fe_cmov(&ge->y, &tmp, secp256k1_fe_is_odd(t));
+}
+
+static int secp256k1_generator_generate_internal(const secp256k1_context* ctx, secp256k1_generator* gen, const unsigned char *key32, const unsigned char *blind32) {
+ static const unsigned char prefix1[17] = "1st generation: ";
+ static const unsigned char prefix2[17] = "2nd generation: ";
+ secp256k1_fe t = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 4);
+ secp256k1_ge add;
+ secp256k1_gej accum;
+ int overflow;
+ secp256k1_sha256 sha256;
+ unsigned char b32[32];
+ int ret = 1;
+
+ if (blind32) {
+ secp256k1_scalar blind;
+ secp256k1_scalar_set_b32(&blind, blind32, &overflow);
+ ret = !overflow;
+ CHECK(ret);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &accum, &blind);
+ }
+
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, prefix1, 16);
+ secp256k1_sha256_write(&sha256, key32, 32);
+ secp256k1_sha256_finalize(&sha256, b32);
+ ret &= secp256k1_fe_set_b32(&t, b32);
+ CHECK(ret);
+ shallue_van_de_woestijne(&add, &t);
+ if (blind32) {
+ secp256k1_gej_add_ge(&accum, &accum, &add);
+ } else {
+ secp256k1_gej_set_ge(&accum, &add);
+ }
+
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, prefix2, 16);
+ secp256k1_sha256_write(&sha256, key32, 32);
+ secp256k1_sha256_finalize(&sha256, b32);
+ ret &= secp256k1_fe_set_b32(&t, b32);
+ CHECK(ret);
+ shallue_van_de_woestijne(&add, &t);
+ secp256k1_gej_add_ge(&accum, &accum, &add);
+
+ secp256k1_ge_set_gej(&add, &accum);
+ secp256k1_generator_save(gen, &add);
+ return ret;
+}
+
+int secp256k1_generator_generate(const secp256k1_context* ctx, secp256k1_generator* gen, const unsigned char *key32) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(gen != NULL);
+ ARG_CHECK(key32 != NULL);
+ return secp256k1_generator_generate_internal(ctx, gen, key32, NULL);
+}
+
+int secp256k1_generator_generate_blinded(const secp256k1_context* ctx, secp256k1_generator* gen, const unsigned char *key32, const unsigned char *blind32) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(gen != NULL);
+ ARG_CHECK(key32 != NULL);
+ ARG_CHECK(blind32 != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ return secp256k1_generator_generate_internal(ctx, gen, key32, blind32);
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/tests_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/tests_impl.h
new file mode 100644
index 0000000..8b1a5ac
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/generator/tests_impl.h
@@ -0,0 +1,199 @@
+/**********************************************************************
+ * Copyright (c) 2016 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_GENERATOR_TESTS
+#define SECP256K1_MODULE_GENERATOR_TESTS
+
+#include
+#include
+
+#include "group.h"
+#include "scalar.h"
+#include "testrand.h"
+#include "util.h"
+
+#include "include/secp256k1_generator.h"
+
+void test_generator_api(void) {
+ unsigned char key[32];
+ unsigned char blind[32];
+ unsigned char sergen[33];
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_generator gen;
+ int32_t ecount = 0;
+
+ secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_rand256(key);
+ secp256k1_rand256(blind);
+
+ CHECK(secp256k1_generator_generate(none, &gen, key) == 1);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_generator_generate(none, NULL, key) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_generator_generate(none, &gen, NULL) == 0);
+ CHECK(ecount == 2);
+
+ CHECK(secp256k1_generator_generate_blinded(sign, &gen, key, blind) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_generator_generate_blinded(vrfy, &gen, key, blind) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_generator_generate_blinded(none, &gen, key, blind) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_generator_generate_blinded(vrfy, NULL, key, blind) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_generator_generate_blinded(vrfy, &gen, NULL, blind) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_generator_generate_blinded(vrfy, &gen, key, NULL) == 0);
+ CHECK(ecount == 7);
+
+ CHECK(secp256k1_generator_serialize(none, sergen, &gen) == 1);
+ CHECK(ecount == 7);
+ CHECK(secp256k1_generator_serialize(none, NULL, &gen) == 0);
+ CHECK(ecount == 8);
+ CHECK(secp256k1_generator_serialize(none, sergen, NULL) == 0);
+ CHECK(ecount == 9);
+
+ CHECK(secp256k1_generator_serialize(none, sergen, &gen) == 1);
+ CHECK(secp256k1_generator_parse(none, &gen, sergen) == 1);
+ CHECK(ecount == 9);
+ CHECK(secp256k1_generator_parse(none, NULL, sergen) == 0);
+ CHECK(ecount == 10);
+ CHECK(secp256k1_generator_parse(none, &gen, NULL) == 0);
+ CHECK(ecount == 11);
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+}
+
+void test_shallue_van_de_woestijne(void) {
+ /* Matches with the output of the shallue_van_de_woestijne.sage SAGE program */
+ static const secp256k1_ge_storage results[32] = {
+ SECP256K1_GE_STORAGE_CONST(0xedd1fd3e, 0x327ce90c, 0xc7a35426, 0x14289aee, 0x9682003e, 0x9cf7dcc9, 0xcf2ca974, 0x3be5aa0c, 0x0225f529, 0xee75acaf, 0xccfc4560, 0x26c5e46b, 0xf80237a3, 0x3924655a, 0x16f90e88, 0x085ed52a),
+ SECP256K1_GE_STORAGE_CONST(0xedd1fd3e, 0x327ce90c, 0xc7a35426, 0x14289aee, 0x9682003e, 0x9cf7dcc9, 0xcf2ca974, 0x3be5aa0c, 0xfdda0ad6, 0x118a5350, 0x3303ba9f, 0xd93a1b94, 0x07fdc85c, 0xc6db9aa5, 0xe906f176, 0xf7a12705),
+ SECP256K1_GE_STORAGE_CONST(0x2c5cdc9c, 0x338152fa, 0x85de92cb, 0x1bee9907, 0x765a922e, 0x4f037cce, 0x14ecdbf2, 0x2f78fe15, 0x56716069, 0x6818286b, 0x72f01a3e, 0x5e8caca7, 0x36249160, 0xc7ded69d, 0xd51913c3, 0x03a2fa97),
+ SECP256K1_GE_STORAGE_CONST(0x2c5cdc9c, 0x338152fa, 0x85de92cb, 0x1bee9907, 0x765a922e, 0x4f037cce, 0x14ecdbf2, 0x2f78fe15, 0xa98e9f96, 0x97e7d794, 0x8d0fe5c1, 0xa1735358, 0xc9db6e9f, 0x38212962, 0x2ae6ec3b, 0xfc5d0198),
+ SECP256K1_GE_STORAGE_CONST(0x531f7239, 0xaebc780e, 0x179fbf8d, 0x412a1b01, 0x511f0abc, 0xe0c46151, 0x8b38db84, 0xcc2467f3, 0x82387d45, 0xec7bd5cc, 0x61fcb9df, 0x41cddd7b, 0x217d8114, 0x3577dc8f, 0x23de356a, 0x7e97704e),
+ SECP256K1_GE_STORAGE_CONST(0x531f7239, 0xaebc780e, 0x179fbf8d, 0x412a1b01, 0x511f0abc, 0xe0c46151, 0x8b38db84, 0xcc2467f3, 0x7dc782ba, 0x13842a33, 0x9e034620, 0xbe322284, 0xde827eeb, 0xca882370, 0xdc21ca94, 0x81688be1),
+ SECP256K1_GE_STORAGE_CONST(0x2c5cdc9c, 0x338152fa, 0x85de92cb, 0x1bee9907, 0x765a922e, 0x4f037cce, 0x14ecdbf2, 0x2f78fe15, 0x56716069, 0x6818286b, 0x72f01a3e, 0x5e8caca7, 0x36249160, 0xc7ded69d, 0xd51913c3, 0x03a2fa97),
+ SECP256K1_GE_STORAGE_CONST(0x2c5cdc9c, 0x338152fa, 0x85de92cb, 0x1bee9907, 0x765a922e, 0x4f037cce, 0x14ecdbf2, 0x2f78fe15, 0xa98e9f96, 0x97e7d794, 0x8d0fe5c1, 0xa1735358, 0xc9db6e9f, 0x38212962, 0x2ae6ec3b, 0xfc5d0198),
+ SECP256K1_GE_STORAGE_CONST(0x5e5936b1, 0x81db0b65, 0x8e33a8c6, 0x1aa687dd, 0x31d11e15, 0x85e35664, 0x6b4c2071, 0xcde7e942, 0x88bb5332, 0xa8e05654, 0x78d4f60c, 0x0cd979ec, 0x938558f2, 0xcac11216, 0x7c387a56, 0xe3a6d5f3),
+ SECP256K1_GE_STORAGE_CONST(0x5e5936b1, 0x81db0b65, 0x8e33a8c6, 0x1aa687dd, 0x31d11e15, 0x85e35664, 0x6b4c2071, 0xcde7e942, 0x7744accd, 0x571fa9ab, 0x872b09f3, 0xf3268613, 0x6c7aa70d, 0x353eede9, 0x83c785a8, 0x1c59263c),
+ SECP256K1_GE_STORAGE_CONST(0x657d438f, 0xfac34a50, 0x463fd07c, 0x3f09f320, 0x4c98e8ed, 0x6927e330, 0xc0c7735f, 0x76d32f6d, 0x577c2b11, 0xcaca2f6f, 0xd60bcaf0, 0x3e7cebe9, 0x5da6e1f4, 0xbb557f12, 0x2a397331, 0x81df897f),
+ SECP256K1_GE_STORAGE_CONST(0x657d438f, 0xfac34a50, 0x463fd07c, 0x3f09f320, 0x4c98e8ed, 0x6927e330, 0xc0c7735f, 0x76d32f6d, 0xa883d4ee, 0x3535d090, 0x29f4350f, 0xc1831416, 0xa2591e0b, 0x44aa80ed, 0xd5c68ccd, 0x7e2072b0),
+ SECP256K1_GE_STORAGE_CONST(0xbe0bc11b, 0x2bc639cb, 0xc28f72a8, 0xd07c21cc, 0xbc06cfa7, 0x4c2ff25e, 0x630c9740, 0x23128eab, 0x6f062fc8, 0x75148197, 0xd10375c3, 0xcc3fadb6, 0x20277e9c, 0x00579c55, 0xeddd7f95, 0xe95604db),
+ SECP256K1_GE_STORAGE_CONST(0xbe0bc11b, 0x2bc639cb, 0xc28f72a8, 0xd07c21cc, 0xbc06cfa7, 0x4c2ff25e, 0x630c9740, 0x23128eab, 0x90f9d037, 0x8aeb7e68, 0x2efc8a3c, 0x33c05249, 0xdfd88163, 0xffa863aa, 0x12228069, 0x16a9f754),
+ SECP256K1_GE_STORAGE_CONST(0xedd1fd3e, 0x327ce90c, 0xc7a35426, 0x14289aee, 0x9682003e, 0x9cf7dcc9, 0xcf2ca974, 0x3be5aa0c, 0xfdda0ad6, 0x118a5350, 0x3303ba9f, 0xd93a1b94, 0x07fdc85c, 0xc6db9aa5, 0xe906f176, 0xf7a12705),
+ SECP256K1_GE_STORAGE_CONST(0xedd1fd3e, 0x327ce90c, 0xc7a35426, 0x14289aee, 0x9682003e, 0x9cf7dcc9, 0xcf2ca974, 0x3be5aa0c, 0x0225f529, 0xee75acaf, 0xccfc4560, 0x26c5e46b, 0xf80237a3, 0x3924655a, 0x16f90e88, 0x085ed52a),
+ SECP256K1_GE_STORAGE_CONST(0xaee172d4, 0xce7c5010, 0xdb20a88f, 0x469598c1, 0xd7f7926f, 0xabb85cb5, 0x339f1403, 0x87e6b494, 0x38065980, 0x4de81b35, 0x098c7190, 0xe3380f9d, 0x95b2ed6c, 0x6c869e85, 0xc772bc5a, 0x7bc3d9d5),
+ SECP256K1_GE_STORAGE_CONST(0xaee172d4, 0xce7c5010, 0xdb20a88f, 0x469598c1, 0xd7f7926f, 0xabb85cb5, 0x339f1403, 0x87e6b494, 0xc7f9a67f, 0xb217e4ca, 0xf6738e6f, 0x1cc7f062, 0x6a4d1293, 0x9379617a, 0x388d43a4, 0x843c225a),
+ SECP256K1_GE_STORAGE_CONST(0xc28f5c28, 0xf5c28f5c, 0x28f5c28f, 0x5c28f5c2, 0x8f5c28f5, 0xc28f5c28, 0xf5c28f5b, 0x6666635a, 0x0c4da840, 0x1b2cf5be, 0x4604e6ec, 0xf92b2780, 0x063a5351, 0xe294bf65, 0xbb2f8b61, 0x00902db7),
+ SECP256K1_GE_STORAGE_CONST(0xc28f5c28, 0xf5c28f5c, 0x28f5c28f, 0x5c28f5c2, 0x8f5c28f5, 0xc28f5c28, 0xf5c28f5b, 0x6666635a, 0xf3b257bf, 0xe4d30a41, 0xb9fb1913, 0x06d4d87f, 0xf9c5acae, 0x1d6b409a, 0x44d0749d, 0xff6fce78),
+ SECP256K1_GE_STORAGE_CONST(0xecf56be6, 0x9c8fde26, 0x152832c6, 0xe043b3d5, 0xaf9a723f, 0x789854a0, 0xcb1b810d, 0xe2614ece, 0x66127ae4, 0xe4c17a75, 0x60a727e6, 0xffd2ea7f, 0xaed99088, 0xbec465c6, 0xbde56791, 0x37ed5572),
+ SECP256K1_GE_STORAGE_CONST(0xecf56be6, 0x9c8fde26, 0x152832c6, 0xe043b3d5, 0xaf9a723f, 0x789854a0, 0xcb1b810d, 0xe2614ece, 0x99ed851b, 0x1b3e858a, 0x9f58d819, 0x002d1580, 0x51266f77, 0x413b9a39, 0x421a986d, 0xc812a6bd),
+ SECP256K1_GE_STORAGE_CONST(0xba72860f, 0x10fcd142, 0x23f71e3c, 0x228deb9a, 0xc46c5ff5, 0x90b884e5, 0xcc60d51e, 0x0629d16e, 0x67999f31, 0x5a74ada3, 0x526832cf, 0x76b9fec3, 0xa348cc97, 0x33c3aa67, 0x02bd2516, 0x7814f635),
+ SECP256K1_GE_STORAGE_CONST(0xba72860f, 0x10fcd142, 0x23f71e3c, 0x228deb9a, 0xc46c5ff5, 0x90b884e5, 0xcc60d51e, 0x0629d16e, 0x986660ce, 0xa58b525c, 0xad97cd30, 0x8946013c, 0x5cb73368, 0xcc3c5598, 0xfd42dae8, 0x87eb05fa),
+ SECP256K1_GE_STORAGE_CONST(0x92ef5657, 0xdba51cc7, 0xf3e1b442, 0xa6a0916b, 0x8ce03079, 0x2ef5657d, 0xba51cc7e, 0xab2beb65, 0x782c65d2, 0x3f1e0eb2, 0x9179a994, 0xe5e8ff80, 0x5a0d50d9, 0xdeeaed90, 0xcec96ca5, 0x973e2ad3),
+ SECP256K1_GE_STORAGE_CONST(0x92ef5657, 0xdba51cc7, 0xf3e1b442, 0xa6a0916b, 0x8ce03079, 0x2ef5657d, 0xba51cc7e, 0xab2beb65, 0x87d39a2d, 0xc0e1f14d, 0x6e86566b, 0x1a17007f, 0xa5f2af26, 0x2115126f, 0x31369359, 0x68c1d15c),
+ SECP256K1_GE_STORAGE_CONST(0x9468ad22, 0xf921fc78, 0x8de3f1b0, 0x586c58eb, 0x5e6f0270, 0xe950b602, 0x7ada90d9, 0xd71ae323, 0x922a0c6a, 0x9ccc31d9, 0xc3bf87fd, 0x88381739, 0x35fe393f, 0xa64dfdec, 0x29f2846d, 0x12918d86),
+ SECP256K1_GE_STORAGE_CONST(0x9468ad22, 0xf921fc78, 0x8de3f1b0, 0x586c58eb, 0x5e6f0270, 0xe950b602, 0x7ada90d9, 0xd71ae323, 0x6dd5f395, 0x6333ce26, 0x3c407802, 0x77c7e8c6, 0xca01c6c0, 0x59b20213, 0xd60d7b91, 0xed6e6ea9),
+ SECP256K1_GE_STORAGE_CONST(0x76ddc7f5, 0xe029e59e, 0x22b0e54f, 0xa811db94, 0x5a209c4f, 0x5e912ca2, 0x8b4da6a7, 0x4c1e00a2, 0x1e8f516c, 0x91c20437, 0x50f6e24e, 0x8c2cf202, 0xacf68291, 0xbf8b66eb, 0xf7335b62, 0xec2c88fe),
+ SECP256K1_GE_STORAGE_CONST(0x76ddc7f5, 0xe029e59e, 0x22b0e54f, 0xa811db94, 0x5a209c4f, 0x5e912ca2, 0x8b4da6a7, 0x4c1e00a2, 0xe170ae93, 0x6e3dfbc8, 0xaf091db1, 0x73d30dfd, 0x53097d6e, 0x40749914, 0x08cca49c, 0x13d37331),
+ SECP256K1_GE_STORAGE_CONST(0xf75763bc, 0x2907e79b, 0x125e33c3, 0x9a027f48, 0x0f8c6409, 0x2153432f, 0x967bc2b1, 0x1d1f5cf0, 0xb4a8edc6, 0x36391b39, 0x9bc219c0, 0x3d033128, 0xdbcd463e, 0xd2506394, 0x061b87a5, 0x9e510235),
+ SECP256K1_GE_STORAGE_CONST(0xf75763bc, 0x2907e79b, 0x125e33c3, 0x9a027f48, 0x0f8c6409, 0x2153432f, 0x967bc2b1, 0x1d1f5cf0, 0x4b571239, 0xc9c6e4c6, 0x643de63f, 0xc2fcced7, 0x2432b9c1, 0x2daf9c6b, 0xf9e47859, 0x61aef9fa),
+ };
+
+ secp256k1_ge ge;
+ secp256k1_fe fe;
+ secp256k1_ge_storage ges;
+ int i, s;
+ for (i = 1; i <= 16; i++) {
+ secp256k1_fe_set_int(&fe, i);
+
+ for (s = 0; s < 2; s++) {
+ if (s) {
+ secp256k1_fe_negate(&fe, &fe, 1);
+ secp256k1_fe_normalize(&fe);
+ }
+ shallue_van_de_woestijne(&ge, &fe);
+ secp256k1_ge_to_storage(&ges, &ge);
+
+ CHECK(memcmp(&ges, &results[i * 2 + s - 2], sizeof(secp256k1_ge_storage)) == 0);
+ }
+ }
+}
+
+void test_generator_generate(void) {
+ static const secp256k1_ge_storage results[32] = {
+ SECP256K1_GE_STORAGE_CONST(0x806cd8ed, 0xd6c153e3, 0x4aa9b9a0, 0x8755c4be, 0x4718b1ef, 0xb26cb93f, 0xfdd99e1b, 0x21f2af8e, 0xc7062208, 0xcc649a03, 0x1bdc1a33, 0x9d01f115, 0x4bcd0dca, 0xfe0b875d, 0x62f35f73, 0x28673006),
+ SECP256K1_GE_STORAGE_CONST(0xd91b15ec, 0x47a811f4, 0xaa189561, 0xd13f5c4d, 0x4e81f10d, 0xc7dc551f, 0x4fea9b84, 0x610314c4, 0x9b0ada1e, 0xb38efd67, 0x8bff0b6c, 0x7d7315f7, 0xb49b8cc5, 0xa679fad4, 0xc94f9dc6, 0x9da66382),
+ SECP256K1_GE_STORAGE_CONST(0x11c00de6, 0xf885035e, 0x76051430, 0xa3c38b2a, 0x5f86ab8c, 0xf66dae58, 0x04ea7307, 0x348b19bf, 0xe0858ae7, 0x61dcb1ba, 0xff247e37, 0xd38fcd88, 0xf3bd7911, 0xaa4ed6e0, 0x28d792dd, 0x3ee1ac09),
+ SECP256K1_GE_STORAGE_CONST(0x986b99eb, 0x3130e7f0, 0xe779f674, 0xb85cb514, 0x46a676bf, 0xb1dfb603, 0x4c4bb639, 0x7c406210, 0xdf900609, 0x8b3ef1e0, 0x30e32fb0, 0xd97a4329, 0xff98aed0, 0xcd278c3f, 0xe6078467, 0xfbd12f35),
+ SECP256K1_GE_STORAGE_CONST(0xae528146, 0x03fdf91e, 0xc592977e, 0x12461dc7, 0xb9e038f8, 0x048dcb62, 0xea264756, 0xd459ae42, 0x80ef658d, 0x92becb84, 0xdba8e4f9, 0x560d7a72, 0xbaf4c393, 0xfbcf6007, 0x11039f1c, 0x224faaad),
+ SECP256K1_GE_STORAGE_CONST(0x00df3d91, 0x35975eee, 0x91fab903, 0xe3128e4a, 0xca071dde, 0x270814e5, 0xcbda69ec, 0xcad58f46, 0x11b590aa, 0x92d89969, 0x2dbd932f, 0x08013b8b, 0x45afabc6, 0x43677db2, 0x143e0c0f, 0x5865fb03),
+ SECP256K1_GE_STORAGE_CONST(0x1168155b, 0x987e9bc8, 0x84c5f3f4, 0x92ebf784, 0xcc8c6735, 0x39d8e5e8, 0xa967115a, 0x2949da9b, 0x0858a470, 0xf403ca97, 0xb1827f6f, 0x544c2c67, 0x08f6cb83, 0xc510c317, 0x96c981ed, 0xb9f61780),
+ SECP256K1_GE_STORAGE_CONST(0xe8d7c0cf, 0x2bb4194c, 0x97bf2a36, 0xbd115ba0, 0x81a9afe8, 0x7663fa3c, 0x9c3cd253, 0x79fe2571, 0x2028ad04, 0xefa00119, 0x5a25d598, 0x67e79502, 0x49de7c61, 0x4751cd9d, 0x4fb317f6, 0xf76f1110),
+ SECP256K1_GE_STORAGE_CONST(0x9532c491, 0xa64851dd, 0xcd0d3e5a, 0x93e17267, 0xa10aca95, 0xa23781aa, 0x5087f340, 0xc45fecc3, 0xb691ddc2, 0x3143a7b6, 0x09969302, 0x258affb8, 0x5bbf8666, 0xe1192319, 0xeb174d88, 0x308bd57a),
+ SECP256K1_GE_STORAGE_CONST(0x6b20b6e2, 0x1ba6cc44, 0x3f2c3a0c, 0x5283ba44, 0xbee43a0a, 0x2799a6cf, 0xbecc0f8a, 0xf8c583ac, 0xf7021e76, 0xd51291a6, 0xf9396215, 0x686f25aa, 0xbec36282, 0x5e11eeea, 0x6e51a6e6, 0xd7d7c006),
+ SECP256K1_GE_STORAGE_CONST(0xde27e6ff, 0x219b3ab1, 0x2b0a9e4e, 0x51fc6092, 0x96e55af6, 0xc6f717d6, 0x12cd6cce, 0x65d6c8f2, 0x48166884, 0x4dc13fd2, 0xed7a7d81, 0x66a0839a, 0x8a960863, 0xfe0001c1, 0x35d206fd, 0x63b87c09),
+ SECP256K1_GE_STORAGE_CONST(0x79a96fb8, 0xd88a08d3, 0x055d38d1, 0x3346b0d4, 0x47d838ca, 0xfcc8fa40, 0x6d3a7157, 0xef84e7e3, 0x6bab9c45, 0x2871b51d, 0xb0df2369, 0xe7860e01, 0x2e37ffea, 0x6689fd1a, 0x9c6fe9cf, 0xb940acea),
+ SECP256K1_GE_STORAGE_CONST(0x06c4d4cb, 0xd32c0ddb, 0x67e988c6, 0x2bdbe6ad, 0xa39b80cc, 0x61afb347, 0x234abe27, 0xa689618c, 0x5b355949, 0xf904fe08, 0x569b2313, 0xe8f19f8d, 0xc5b79e27, 0x70da0832, 0x5fb7a229, 0x238ca6b6),
+ SECP256K1_GE_STORAGE_CONST(0x7027e566, 0x3e727c28, 0x42aa14e5, 0x52c2d2ec, 0x1d8beaa9, 0x8a22ceab, 0x15ccafc3, 0xb4f06249, 0x9b3dffbc, 0xdbd5e045, 0x6931fd03, 0x8b1c6a9b, 0x4c168c6d, 0xa6553897, 0xfe11ce49, 0xac728139),
+ SECP256K1_GE_STORAGE_CONST(0xee3520c3, 0x9f2b954d, 0xf8e15547, 0xdaeb6cc8, 0x04c8f3b0, 0x9301f53e, 0xe0c11ea1, 0xeace539d, 0x244ff873, 0x7e060c98, 0xe843c353, 0xcd35d2e4, 0x3cd8b082, 0xcffbc9ae, 0x81eafa70, 0x332f9748),
+ SECP256K1_GE_STORAGE_CONST(0xdaecd756, 0xf5b706a4, 0xc14e1095, 0x3e2f70df, 0xa81276e7, 0x71806b89, 0x4d8a5502, 0xa0ef4998, 0xbac906c0, 0x948b1d48, 0xe023f439, 0xfd3770b8, 0x837f60cc, 0x40552a51, 0x433d0b79, 0x6610da27),
+ SECP256K1_GE_STORAGE_CONST(0x55e1ca28, 0x750fe2d0, 0x57f7449b, 0x3f49d999, 0x3b9616dd, 0x5387bc2e, 0x6e6698f8, 0xc4ea49f4, 0xe339e0e9, 0xa4c7fa99, 0xd063e062, 0x6582bce2, 0x33c6b1ee, 0x17a5b47f, 0x6d43ecf8, 0x98b40120),
+ SECP256K1_GE_STORAGE_CONST(0xdd82cac2, 0x9e0e0135, 0x4964d3bc, 0x27469233, 0xf13bbd5e, 0xd7aff24b, 0x4902fca8, 0x17294b12, 0x561ab1d6, 0xcd9bcb6e, 0x805585cf, 0x3df8714c, 0x1bfa6304, 0x5efbf122, 0x1a3d8fd9, 0x3827764a),
+ SECP256K1_GE_STORAGE_CONST(0xda5cbfb7, 0x3522e9c7, 0xcb594436, 0x83677038, 0x0eaa64a9, 0x2eca3888, 0x0fe4c9d6, 0xdeb22dbf, 0x4f46de68, 0x0447c780, 0xc54a314b, 0x5389a926, 0xbba8910b, 0x869fc6cd, 0x42ee82e8, 0x5895e42a),
+ SECP256K1_GE_STORAGE_CONST(0x4e09830e, 0xc8894c58, 0x4e6278de, 0x167a96b0, 0x20d60463, 0xee48f788, 0x4974d66e, 0x871e35e9, 0x21259c4d, 0x332ca932, 0x2e187df9, 0xe7afbc23, 0x9d171ebc, 0x7d9e2560, 0x503f50b1, 0x9fe45834),
+ SECP256K1_GE_STORAGE_CONST(0xabfff6ca, 0x41dcfd17, 0x03cae629, 0x9d127971, 0xf19ee000, 0x2db332e6, 0x5cc209a3, 0xc21b8f54, 0x65991d60, 0xee54f5cc, 0xddf7a732, 0xa76b0303, 0xb9f519a6, 0x22ea0390, 0x8af23ffa, 0x35ae6632),
+ SECP256K1_GE_STORAGE_CONST(0xc6c9b92c, 0x91e045a5, 0xa1913277, 0x44d6fce2, 0x11b12c7c, 0x9b3112d6, 0xc61e14a6, 0xd6b1ae12, 0x04ab0396, 0xebdc4c6a, 0xc213cc3e, 0x077a2e80, 0xb4ba7b2b, 0x33907d56, 0x2c98ccf7, 0xb82a2e9f),
+ SECP256K1_GE_STORAGE_CONST(0x66f6e6d9, 0xc4bb9a5f, 0x99085781, 0x83cb9362, 0x2ea437d8, 0xccd31969, 0xffadca3a, 0xff1d3935, 0x50a5b06e, 0x39e039d7, 0x1dfb2723, 0x18db74e5, 0x5af64da1, 0xdfc34586, 0x6aac3bd0, 0x5792a890),
+ SECP256K1_GE_STORAGE_CONST(0x58ded03c, 0x98e1a890, 0x63fc7793, 0xe3ecd896, 0x235e75c9, 0x82e7008f, 0xddbf3ca8, 0x5b7e9ecb, 0x34594776, 0x58ab6821, 0xaf43a453, 0xa946fda9, 0x13d24999, 0xccf22df8, 0xd291ef59, 0xb08975c0),
+ SECP256K1_GE_STORAGE_CONST(0x74557864, 0x4f2b0486, 0xd5beea7c, 0x2d258ccb, 0x78a870e1, 0x848982d8, 0xed3f91a4, 0x9db83a36, 0xd84e940e, 0x1d33c28a, 0x62398ec8, 0xc493aee7, 0x7c2ba722, 0x42dee7ae, 0x3c35c256, 0xad00cf42),
+ SECP256K1_GE_STORAGE_CONST(0x7fc7963a, 0x16abc8fb, 0x5d61eb61, 0x0fc50a68, 0x754470d2, 0xf43df3be, 0x52228f66, 0x522fe61b, 0x499f9e7f, 0x462c6545, 0x29687af4, 0x9f7c732d, 0x48801ce5, 0x21acd546, 0xc6fb903c, 0x7c265032),
+ SECP256K1_GE_STORAGE_CONST(0xb2f6257c, 0xc58df82f, 0xb9ba4f36, 0x7ededf03, 0xf8ea10f3, 0x104d7ae6, 0x233b7ac4, 0x725e11de, 0x9c7a32df, 0x4842f33d, 0xaad84f0b, 0x62e88b40, 0x46ddcbde, 0xbbeec6f8, 0x93bfde27, 0x0561dc73),
+ SECP256K1_GE_STORAGE_CONST(0xe2cdfd27, 0x8a8e22be, 0xabf08b79, 0x1bc6ae38, 0x41d22a9a, 0x9472e266, 0x1a7c6e83, 0xa2f74725, 0x0e26c103, 0xe0dd93b2, 0x3724f3b7, 0x8bb7366e, 0x2c245768, 0xd64f3283, 0xd8316e8a, 0x1383b977),
+ SECP256K1_GE_STORAGE_CONST(0x757c13e7, 0xe866017e, 0xe6af61d7, 0x161d208a, 0xc438f712, 0x242fcd23, 0x63a10e59, 0xd67e41fb, 0xb550c6a9, 0x4ddb15f3, 0xfeea4bfe, 0xd2faa19f, 0x2aa2fbd3, 0x0c6ae785, 0xe357f365, 0xb30d12e0),
+ SECP256K1_GE_STORAGE_CONST(0x528d525e, 0xac30095b, 0x5e5f83ca, 0x4d3dea63, 0xeb608f2d, 0x18dd25a7, 0x2529c8e5, 0x1ae5f9f1, 0xfde2860b, 0x492a4106, 0x9f356c05, 0x3ebc045e, 0x4ad08b79, 0x3e264935, 0xf25785a9, 0x8690b5ee),
+ SECP256K1_GE_STORAGE_CONST(0x150df593, 0x5b6956a0, 0x0cfed843, 0xb9d6ffce, 0x4f790022, 0xea18730f, 0xc495111d, 0x91568e55, 0x6700a2ca, 0x9ff4ed32, 0xc1697312, 0x4eb51ce3, 0x5656344b, 0x65a1e3d5, 0xd6c1f7ce, 0x29233f82),
+ SECP256K1_GE_STORAGE_CONST(0x38e02eaf, 0x2c8774fd, 0x58b8b373, 0x732457f1, 0x16dbe53b, 0xea5683d9, 0xada20dd7, 0x14ce20a6, 0x6ac5362e, 0xbb425416, 0x8250f43f, 0xa4ee2b63, 0x0406324f, 0x1c876d60, 0xebe5be2c, 0x6eb1515b),
+ };
+ secp256k1_generator gen;
+ secp256k1_ge ge;
+ secp256k1_ge_storage ges;
+ int i;
+ unsigned char v[32];
+ static const unsigned char s[32] = {0};
+ secp256k1_scalar sc;
+ secp256k1_scalar_set_b32(&sc, s, NULL);
+ for (i = 1; i <= 32; i++) {
+ memset(v, 0, 31);
+ v[31] = i;
+ CHECK(secp256k1_generator_generate_blinded(ctx, &gen, v, s));
+ secp256k1_generator_load(&ge, &gen);
+ secp256k1_ge_to_storage(&ges, &ge);
+ CHECK(memcmp(&ges, &results[i - 1], sizeof(secp256k1_ge_storage)) == 0);
+ CHECK(secp256k1_generator_generate(ctx, &gen, v));
+ secp256k1_generator_load(&ge, &gen);
+ secp256k1_ge_to_storage(&ges, &ge);
+ CHECK(memcmp(&ges, &results[i - 1], sizeof(secp256k1_ge_storage)) == 0);
+ }
+}
+
+void run_generator_tests(void) {
+ test_shallue_van_de_woestijne();
+ test_generator_api();
+ test_generator_generate();
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.borromean.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.borromean.h.swp
new file mode 100644
index 0000000..dbc37bf
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.borromean.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.borromean_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.borromean_impl.h.swp
new file mode 100644
index 0000000..d2d1456
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.borromean_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.main_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.main_impl.h.swp
new file mode 100644
index 0000000..af1bf64
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.main_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.tests_impl.h.swp b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.tests_impl.h.swp
new file mode 100644
index 0000000..ff6e0d8
Binary files /dev/null and b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/.tests_impl.h.swp differ
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/Makefile.am.include b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/Makefile.am.include
new file mode 100644
index 0000000..5272f22
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/Makefile.am.include
@@ -0,0 +1,13 @@
+include_HEADERS += include/secp256k1_rangeproof.h
+noinst_HEADERS += src/modules/rangeproof/main_impl.h
+noinst_HEADERS += src/modules/rangeproof/borromean.h
+noinst_HEADERS += src/modules/rangeproof/borromean_impl.h
+noinst_HEADERS += src/modules/rangeproof/rangeproof.h
+noinst_HEADERS += src/modules/rangeproof/rangeproof_impl.h
+noinst_HEADERS += src/modules/rangeproof/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_rangeproof
+bench_rangeproof_SOURCES = src/bench_rangeproof.c
+bench_rangeproof_LDADD = libsecp256k1.la $(SECP_LIBS)
+bench_rangeproof_LDFLAGS = -static
+endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/borromean.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/borromean.h
new file mode 100644
index 0000000..8f8cfed
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/borromean.h
@@ -0,0 +1,24 @@
+/**********************************************************************
+ * Copyright (c) 2014, 2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+
+#ifndef _SECP256K1_BORROMEAN_H_
+#define _SECP256K1_BORROMEAN_H_
+
+#include "scalar.h"
+#include "field.h"
+#include "group.h"
+#include "ecmult.h"
+#include "ecmult_gen.h"
+
+int secp256k1_borromean_verify(const secp256k1_ecmult_context* ecmult_ctx, secp256k1_scalar *evalues, const unsigned char *e0, const secp256k1_scalar *s,
+ const secp256k1_gej *pubs, const size_t *rsizes, size_t nrings, const unsigned char *m, size_t mlen);
+
+int secp256k1_borromean_sign(const secp256k1_ecmult_context* ecmult_ctx, const secp256k1_ecmult_gen_context *ecmult_gen_ctx,
+ unsigned char *e0, secp256k1_scalar *s, const secp256k1_gej *pubs, const secp256k1_scalar *k, const secp256k1_scalar *sec,
+ const size_t *rsizes, const size_t *secidx, size_t nrings, const unsigned char *m, size_t mlen);
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/borromean_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/borromean_impl.h
new file mode 100644
index 0000000..3a82f09
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/borromean_impl.h
@@ -0,0 +1,204 @@
+/**********************************************************************
+ * Copyright (c) 2014, 2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+
+#ifndef _SECP256K1_BORROMEAN_IMPL_H_
+#define _SECP256K1_BORROMEAN_IMPL_H_
+
+#include "scalar.h"
+#include "field.h"
+#include "group.h"
+#include "hash.h"
+#include "eckey.h"
+#include "ecmult.h"
+#include "ecmult_gen.h"
+#include "borromean.h"
+
+#include
+#include
+
+#ifdef WORDS_BIGENDIAN
+#define BE32(x) (x)
+#else
+#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
+#endif
+
+SECP256K1_INLINE static void secp256k1_borromean_hash(unsigned char *hash, const unsigned char *m, size_t mlen, const unsigned char *e, size_t elen,
+ size_t ridx, size_t eidx) {
+ uint32_t ring;
+ uint32_t epos;
+ secp256k1_sha256 sha256_en;
+ secp256k1_sha256_initialize(&sha256_en);
+ ring = BE32((uint32_t)ridx);
+ epos = BE32((uint32_t)eidx);
+ secp256k1_sha256_write(&sha256_en, e, elen);
+ secp256k1_sha256_write(&sha256_en, m, mlen);
+ secp256k1_sha256_write(&sha256_en, (unsigned char*)&ring, 4);
+ secp256k1_sha256_write(&sha256_en, (unsigned char*)&epos, 4);
+ secp256k1_sha256_finalize(&sha256_en, hash);
+}
+
+/** "Borromean" ring signature.
+ * Verifies nrings concurrent ring signatures all sharing a challenge value.
+ * Signature is one s value per pubkey and a hash.
+ * Verification equation:
+ * | m = H(P_{0..}||message) (Message must contain pubkeys or a pubkey commitment)
+ * | For each ring i:
+ * | | en = to_scalar(H(e0||m||i||0))
+ * | | For each pubkey j:
+ * | | | r = s_i_j G + en * P_i_j
+ * | | | e = H(r||m||i||j)
+ * | | | en = to_scalar(e)
+ * | | r_i = r
+ * | return e_0 ==== H(r_{0..i}||m)
+ */
+int secp256k1_borromean_verify(const secp256k1_ecmult_context* ecmult_ctx, secp256k1_scalar *evalues, const unsigned char *e0,
+ const secp256k1_scalar *s, const secp256k1_gej *pubs, const size_t *rsizes, size_t nrings, const unsigned char *m, size_t mlen) {
+ secp256k1_gej rgej;
+ secp256k1_ge rge;
+ secp256k1_scalar ens;
+ secp256k1_sha256 sha256_e0;
+ unsigned char tmp[33];
+ size_t i;
+ size_t j;
+ size_t count;
+ size_t size;
+ int overflow;
+ VERIFY_CHECK(ecmult_ctx != NULL);
+ VERIFY_CHECK(e0 != NULL);
+ VERIFY_CHECK(s != NULL);
+ VERIFY_CHECK(pubs != NULL);
+ VERIFY_CHECK(rsizes != NULL);
+ VERIFY_CHECK(nrings > 0);
+ VERIFY_CHECK(m != NULL);
+ count = 0;
+ secp256k1_sha256_initialize(&sha256_e0);
+ for (i = 0; i < nrings; i++) {
+ VERIFY_CHECK(INT_MAX - count > rsizes[i]);
+ secp256k1_borromean_hash(tmp, m, mlen, e0, 32, i, 0);
+ secp256k1_scalar_set_b32(&ens, tmp, &overflow);
+ for (j = 0; j < rsizes[i]; j++) {
+ if (overflow || secp256k1_scalar_is_zero(&s[count]) || secp256k1_scalar_is_zero(&ens) || secp256k1_gej_is_infinity(&pubs[count])) {
+ return 0;
+ }
+ if (evalues) {
+ /*If requested, save the challenges for proof rewind.*/
+ evalues[count] = ens;
+ }
+ secp256k1_ecmult(ecmult_ctx, &rgej, &pubs[count], &ens, &s[count]);
+ if (secp256k1_gej_is_infinity(&rgej)) {
+ return 0;
+ }
+ /* OPT: loop can be hoisted and split to use batch inversion across all the rings; this would make it much faster. */
+ secp256k1_ge_set_gej_var(&rge, &rgej);
+ secp256k1_eckey_pubkey_serialize(&rge, tmp, &size, 1);
+ if (j != rsizes[i] - 1) {
+ secp256k1_borromean_hash(tmp, m, mlen, tmp, 33, i, j + 1);
+ secp256k1_scalar_set_b32(&ens, tmp, &overflow);
+ } else {
+ secp256k1_sha256_write(&sha256_e0, tmp, size);
+ }
+ count++;
+ }
+ }
+ secp256k1_sha256_write(&sha256_e0, m, mlen);
+ secp256k1_sha256_finalize(&sha256_e0, tmp);
+ return memcmp(e0, tmp, 32) == 0;
+}
+
+int secp256k1_borromean_sign(const secp256k1_ecmult_context* ecmult_ctx, const secp256k1_ecmult_gen_context *ecmult_gen_ctx,
+ unsigned char *e0, secp256k1_scalar *s, const secp256k1_gej *pubs, const secp256k1_scalar *k, const secp256k1_scalar *sec,
+ const size_t *rsizes, const size_t *secidx, size_t nrings, const unsigned char *m, size_t mlen) {
+ secp256k1_gej rgej;
+ secp256k1_ge rge;
+ secp256k1_scalar ens;
+ secp256k1_sha256 sha256_e0;
+ unsigned char tmp[33];
+ size_t i;
+ size_t j;
+ size_t count;
+ size_t size;
+ int overflow;
+ VERIFY_CHECK(ecmult_ctx != NULL);
+ VERIFY_CHECK(ecmult_gen_ctx != NULL);
+ VERIFY_CHECK(e0 != NULL);
+ VERIFY_CHECK(s != NULL);
+ VERIFY_CHECK(pubs != NULL);
+ VERIFY_CHECK(k != NULL);
+ VERIFY_CHECK(sec != NULL);
+ VERIFY_CHECK(rsizes != NULL);
+ VERIFY_CHECK(secidx != NULL);
+ VERIFY_CHECK(nrings > 0);
+ VERIFY_CHECK(m != NULL);
+ secp256k1_sha256_initialize(&sha256_e0);
+ count = 0;
+ for (i = 0; i < nrings; i++) {
+ VERIFY_CHECK(INT_MAX - count > rsizes[i]);
+ secp256k1_ecmult_gen(ecmult_gen_ctx, &rgej, &k[i]);
+ secp256k1_ge_set_gej(&rge, &rgej);
+ if (secp256k1_gej_is_infinity(&rgej)) {
+ return 0;
+ }
+ secp256k1_eckey_pubkey_serialize(&rge, tmp, &size, 1);
+ for (j = secidx[i] + 1; j < rsizes[i]; j++) {
+ secp256k1_borromean_hash(tmp, m, mlen, tmp, 33, i, j);
+ secp256k1_scalar_set_b32(&ens, tmp, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ens)) {
+ return 0;
+ }
+ /** The signing algorithm as a whole is not memory uniform so there is likely a cache sidechannel that
+ * leaks which members are non-forgeries. That the forgeries themselves are variable time may leave
+ * an additional privacy impacting timing side-channel, but not a key loss one.
+ */
+ secp256k1_ecmult(ecmult_ctx, &rgej, &pubs[count + j], &ens, &s[count + j]);
+ if (secp256k1_gej_is_infinity(&rgej)) {
+ return 0;
+ }
+ secp256k1_ge_set_gej_var(&rge, &rgej);
+ secp256k1_eckey_pubkey_serialize(&rge, tmp, &size, 1);
+ }
+ secp256k1_sha256_write(&sha256_e0, tmp, size);
+ count += rsizes[i];
+ }
+ secp256k1_sha256_write(&sha256_e0, m, mlen);
+ secp256k1_sha256_finalize(&sha256_e0, e0);
+ count = 0;
+ for (i = 0; i < nrings; i++) {
+ VERIFY_CHECK(INT_MAX - count > rsizes[i]);
+ secp256k1_borromean_hash(tmp, m, mlen, e0, 32, i, 0);
+ secp256k1_scalar_set_b32(&ens, tmp, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ens)) {
+ return 0;
+ }
+ for (j = 0; j < secidx[i]; j++) {
+ secp256k1_ecmult(ecmult_ctx, &rgej, &pubs[count + j], &ens, &s[count + j]);
+ if (secp256k1_gej_is_infinity(&rgej)) {
+ return 0;
+ }
+ secp256k1_ge_set_gej_var(&rge, &rgej);
+ secp256k1_eckey_pubkey_serialize(&rge, tmp, &size, 1);
+ secp256k1_borromean_hash(tmp, m, mlen, tmp, 33, i, j + 1);
+ secp256k1_scalar_set_b32(&ens, tmp, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&ens)) {
+ return 0;
+ }
+ }
+ secp256k1_scalar_mul(&s[count + j], &ens, &sec[i]);
+ secp256k1_scalar_negate(&s[count + j], &s[count + j]);
+ secp256k1_scalar_add(&s[count + j], &s[count + j], &k[i]);
+ if (secp256k1_scalar_is_zero(&s[count + j])) {
+ return 0;
+ }
+ count += rsizes[i];
+ }
+ secp256k1_scalar_clear(&ens);
+ secp256k1_ge_clear(&rge);
+ secp256k1_gej_clear(&rgej);
+ memset(tmp, 0, 33);
+ return 1;
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/main_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/main_impl.h
new file mode 100644
index 0000000..9eebe38
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/main_impl.h
@@ -0,0 +1,95 @@
+/**********************************************************************
+ * Copyright (c) 2014-2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_RANGEPROOF_MAIN
+#define SECP256K1_MODULE_RANGEPROOF_MAIN
+
+#include "group.h"
+
+#include "modules/commitment/main_impl.h"
+
+#include "modules/rangeproof/borromean_impl.h"
+#include "modules/rangeproof/rangeproof_impl.h"
+
+int secp256k1_rangeproof_info(const secp256k1_context* ctx, int *exp, int *mantissa,
+ uint64_t *min_value, uint64_t *max_value, const unsigned char *proof, size_t plen) {
+ size_t offset;
+ uint64_t scale;
+ ARG_CHECK(exp != NULL);
+ ARG_CHECK(mantissa != NULL);
+ ARG_CHECK(min_value != NULL);
+ ARG_CHECK(max_value != NULL);
+ ARG_CHECK(proof != NULL);
+ offset = 0;
+ scale = 1;
+ (void)ctx;
+ return secp256k1_rangeproof_getheader_impl(&offset, exp, mantissa, &scale, min_value, max_value, proof, plen);
+}
+
+int secp256k1_rangeproof_rewind(const secp256k1_context* ctx,
+ unsigned char *blind_out, uint64_t *value_out, unsigned char *message_out, size_t *outlen, const unsigned char *nonce,
+ uint64_t *min_value, uint64_t *max_value,
+ const secp256k1_pedersen_commitment *commit, const unsigned char *proof, size_t plen, const unsigned char *extra_commit, size_t extra_commit_len, const secp256k1_generator* gen) {
+ secp256k1_ge commitp;
+ secp256k1_ge genp;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(commit != NULL);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(min_value != NULL);
+ ARG_CHECK(max_value != NULL);
+ ARG_CHECK(message_out != NULL || outlen == NULL);
+ ARG_CHECK(nonce != NULL);
+ ARG_CHECK(extra_commit != NULL || extra_commit_len == 0);
+ ARG_CHECK(gen != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ secp256k1_pedersen_commitment_load(&commitp, commit);
+ secp256k1_generator_load(&genp, gen);
+ return secp256k1_rangeproof_verify_impl(&ctx->ecmult_ctx, &ctx->ecmult_gen_ctx,
+ blind_out, value_out, message_out, outlen, nonce, min_value, max_value, &commitp, proof, plen, extra_commit, extra_commit_len, &genp);
+}
+
+int secp256k1_rangeproof_verify(const secp256k1_context* ctx, uint64_t *min_value, uint64_t *max_value,
+ const secp256k1_pedersen_commitment *commit, const unsigned char *proof, size_t plen, const unsigned char *extra_commit, size_t extra_commit_len, const secp256k1_generator* gen) {
+ secp256k1_ge commitp;
+ secp256k1_ge genp;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(commit != NULL);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(min_value != NULL);
+ ARG_CHECK(max_value != NULL);
+ ARG_CHECK(extra_commit != NULL || extra_commit_len == 0);
+ ARG_CHECK(gen != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ secp256k1_pedersen_commitment_load(&commitp, commit);
+ secp256k1_generator_load(&genp, gen);
+ return secp256k1_rangeproof_verify_impl(&ctx->ecmult_ctx, NULL,
+ NULL, NULL, NULL, NULL, NULL, min_value, max_value, &commitp, proof, plen, extra_commit, extra_commit_len, &genp);
+}
+
+int secp256k1_rangeproof_sign(const secp256k1_context* ctx, unsigned char *proof, size_t *plen, uint64_t min_value,
+ const secp256k1_pedersen_commitment *commit, const unsigned char *blind, const unsigned char *nonce, int exp, int min_bits, uint64_t value,
+ const unsigned char *message, size_t msg_len, const unsigned char *extra_commit, size_t extra_commit_len, const secp256k1_generator* gen){
+ secp256k1_ge commitp;
+ secp256k1_ge genp;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(plen != NULL);
+ ARG_CHECK(commit != NULL);
+ ARG_CHECK(blind != NULL);
+ ARG_CHECK(nonce != NULL);
+ ARG_CHECK(message != NULL || msg_len == 0);
+ ARG_CHECK(extra_commit != NULL || extra_commit_len == 0);
+ ARG_CHECK(gen != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ secp256k1_pedersen_commitment_load(&commitp, commit);
+ secp256k1_generator_load(&genp, gen);
+ return secp256k1_rangeproof_sign_impl(&ctx->ecmult_ctx, &ctx->ecmult_gen_ctx,
+ proof, plen, min_value, &commitp, blind, nonce, exp, min_bits, value, message, msg_len, extra_commit, extra_commit_len, &genp);
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/rangeproof.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/rangeproof.h
new file mode 100644
index 0000000..840a09a
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/rangeproof.h
@@ -0,0 +1,21 @@
+/**********************************************************************
+ * Copyright (c) 2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_RANGEPROOF_H_
+#define _SECP256K1_RANGEPROOF_H_
+
+#include "scalar.h"
+#include "group.h"
+#include "ecmult.h"
+#include "ecmult_gen.h"
+
+static int secp256k1_rangeproof_verify_impl(const secp256k1_ecmult_context* ecmult_ctx,
+ const secp256k1_ecmult_gen_context* ecmult_gen_ctx,
+ unsigned char *blindout, uint64_t *value_out, unsigned char *message_out, size_t *outlen, const unsigned char *nonce,
+ uint64_t *min_value, uint64_t *max_value, const secp256k1_ge *commit, const unsigned char *proof, size_t plen,
+ const unsigned char *extra_commit, size_t extra_commit_len, const secp256k1_ge* genp);
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/rangeproof_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/rangeproof_impl.h
new file mode 100644
index 0000000..8b78c95
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/rangeproof_impl.h
@@ -0,0 +1,685 @@
+/**********************************************************************
+ * Copyright (c) 2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_RANGEPROOF_IMPL_H_
+#define _SECP256K1_RANGEPROOF_IMPL_H_
+
+#include "eckey.h"
+#include "scalar.h"
+#include "group.h"
+#include "rangeproof.h"
+#include "hash_impl.h"
+#include "util.h"
+
+#include "modules/commitment/pedersen_impl.h"
+#include "modules/rangeproof/borromean.h"
+
+SECP256K1_INLINE static void secp256k1_rangeproof_pub_expand(secp256k1_gej *pubs,
+ int exp, size_t *rsizes, size_t rings, const secp256k1_ge* genp) {
+ secp256k1_gej base;
+ size_t i;
+ size_t j;
+ size_t npub;
+ VERIFY_CHECK(exp < 19);
+ if (exp < 0) {
+ exp = 0;
+ }
+ secp256k1_gej_set_ge(&base, genp);
+ secp256k1_gej_neg(&base, &base);
+ while (exp--) {
+ /* Multiplication by 10 */
+ secp256k1_gej tmp;
+ secp256k1_gej_double_var(&tmp, &base, NULL);
+ secp256k1_gej_double_var(&base, &tmp, NULL);
+ secp256k1_gej_double_var(&base, &base, NULL);
+ secp256k1_gej_add_var(&base, &base, &tmp, NULL);
+ }
+ npub = 0;
+ for (i = 0; i < rings; i++) {
+ for (j = 1; j < rsizes[i]; j++) {
+ secp256k1_gej_add_var(&pubs[npub + j], &pubs[npub + j - 1], &base, NULL);
+ }
+ if (i < rings - 1) {
+ secp256k1_gej_double_var(&base, &base, NULL);
+ secp256k1_gej_double_var(&base, &base, NULL);
+ }
+ npub += rsizes[i];
+ }
+}
+
+SECP256K1_INLINE static void secp256k1_rangeproof_serialize_point(unsigned char* data, const secp256k1_ge *point) {
+ secp256k1_fe pointx;
+ pointx = point->x;
+ secp256k1_fe_normalize(&pointx);
+ data[0] = !secp256k1_fe_is_quad_var(&point->y);
+ secp256k1_fe_get_b32(data + 1, &pointx);
+}
+
+SECP256K1_INLINE static int secp256k1_rangeproof_genrand(secp256k1_scalar *sec, secp256k1_scalar *s, unsigned char *message,
+ size_t *rsizes, size_t rings, const unsigned char *nonce, const secp256k1_ge *commit, const unsigned char *proof, size_t len, const secp256k1_ge* genp) {
+ unsigned char tmp[32];
+ unsigned char rngseed[32 + 33 + 33 + 10];
+ secp256k1_rfc6979_hmac_sha256 rng;
+ secp256k1_scalar acc;
+ int overflow;
+ int ret;
+ size_t i;
+ size_t j;
+ int b;
+ size_t npub;
+ VERIFY_CHECK(len <= 10);
+ memcpy(rngseed, nonce, 32);
+ secp256k1_rangeproof_serialize_point(rngseed + 32, commit);
+ secp256k1_rangeproof_serialize_point(rngseed + 32 + 33, genp);
+ memcpy(rngseed + 33 + 33 + 32, proof, len);
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, rngseed, 32 + 33 + 33 + len);
+ secp256k1_scalar_clear(&acc);
+ npub = 0;
+ ret = 1;
+ for (i = 0; i < rings; i++) {
+ if (i < rings - 1) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, tmp, 32);
+ do {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, tmp, 32);
+ secp256k1_scalar_set_b32(&sec[i], tmp, &overflow);
+ } while (overflow || secp256k1_scalar_is_zero(&sec[i]));
+ secp256k1_scalar_add(&acc, &acc, &sec[i]);
+ } else {
+ secp256k1_scalar_negate(&acc, &acc);
+ sec[i] = acc;
+ }
+ for (j = 0; j < rsizes[i]; j++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, tmp, 32);
+ if (message) {
+ for (b = 0; b < 32; b++) {
+ tmp[b] ^= message[(i * 4 + j) * 32 + b];
+ message[(i * 4 + j) * 32 + b] = tmp[b];
+ }
+ }
+ secp256k1_scalar_set_b32(&s[npub], tmp, &overflow);
+ ret &= !(overflow || secp256k1_scalar_is_zero(&s[npub]));
+ npub++;
+ }
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+ secp256k1_scalar_clear(&acc);
+ memset(tmp, 0, 32);
+ return ret;
+}
+
+SECP256K1_INLINE static int secp256k1_range_proveparams(uint64_t *v, size_t *rings, size_t *rsizes, size_t *npub, size_t *secidx, uint64_t *min_value,
+ int *mantissa, uint64_t *scale, int *exp, int *min_bits, uint64_t value) {
+ size_t i;
+ *rings = 1;
+ rsizes[0] = 1;
+ secidx[0] = 0;
+ *scale = 1;
+ *mantissa = 0;
+ *npub = 0;
+ if (*min_value == UINT64_MAX) {
+ /* If the minimum value is the maximal representable value, then we cannot code a range. */
+ *exp = -1;
+ }
+ if (*exp >= 0) {
+ int max_bits;
+ uint64_t v2;
+ if ((*min_value && value > INT64_MAX) || (value && *min_value >= INT64_MAX)) {
+ /* If either value or min_value is >= 2^63-1 then the other must by zero to avoid overflowing the proven range. */
+ return 0;
+ }
+ max_bits = *min_value ? secp256k1_clz64_var(*min_value) : 64;
+ if (*min_bits > max_bits) {
+ *min_bits = max_bits;
+ }
+ if (*min_bits > 61 || value > INT64_MAX) {
+ /** Ten is not a power of two, so dividing by ten and then representing in base-2 times ten
+ * expands the representable range. The verifier requires the proven range is within 0..2**64.
+ * For very large numbers (all over 2**63) we must change our exponent to compensate.
+ * Rather than handling it precisely, this just disables use of the exponent for big values.
+ */
+ *exp = 0;
+ }
+ /* Mask off the least significant digits, as requested. */
+ *v = value - *min_value;
+ /* If the user has asked for more bits of proof then there is room for in the exponent, reduce the exponent. */
+ v2 = *min_bits ? (UINT64_MAX>>(64-*min_bits)) : 0;
+ for (i = 0; (int) i < *exp && (v2 <= UINT64_MAX / 10); i++) {
+ *v /= 10;
+ v2 *= 10;
+ }
+ *exp = i;
+ v2 = *v;
+ for (i = 0; (int) i < *exp; i++) {
+ v2 *= 10;
+ *scale *= 10;
+ }
+ /* If the masked number isn't precise, compute the public offset. */
+ *min_value = value - v2;
+ /* How many bits do we need to represent our value? */
+ *mantissa = *v ? 64 - secp256k1_clz64_var(*v) : 1;
+ if (*min_bits > *mantissa) {
+ /* If the user asked for more precision, give it to them. */
+ *mantissa = *min_bits;
+ }
+ /* Digits in radix-4, except for the last digit if our mantissa length is odd. */
+ *rings = (*mantissa + 1) >> 1;
+ for (i = 0; i < *rings; i++) {
+ rsizes[i] = ((i < *rings - 1) | (!(*mantissa&1))) ? 4 : 2;
+ *npub += rsizes[i];
+ secidx[i] = (*v >> (i*2)) & 3;
+ }
+ VERIFY_CHECK(*mantissa>0);
+ VERIFY_CHECK((*v & ~(UINT64_MAX>>(64-*mantissa))) == 0); /* Did this get all the bits? */
+ } else {
+ /* A proof for an exact value. */
+ *exp = 0;
+ *min_value = value;
+ *v = 0;
+ *npub = 2;
+ }
+ VERIFY_CHECK(*v * *scale + *min_value == value);
+ VERIFY_CHECK(*rings > 0);
+ VERIFY_CHECK(*rings <= 32);
+ VERIFY_CHECK(*npub <= 128);
+ return 1;
+}
+
+/* strawman interface, writes proof in proof, a buffer of plen, proves with respect to min_value the range for commit which has the provided blinding factor and value. */
+SECP256K1_INLINE static int secp256k1_rangeproof_sign_impl(const secp256k1_ecmult_context* ecmult_ctx,
+ const secp256k1_ecmult_gen_context* ecmult_gen_ctx,
+ unsigned char *proof, size_t *plen, uint64_t min_value,
+ const secp256k1_ge *commit, const unsigned char *blind, const unsigned char *nonce, int exp, int min_bits, uint64_t value,
+ const unsigned char *message, size_t msg_len, const unsigned char *extra_commit, size_t extra_commit_len, const secp256k1_ge* genp){
+ secp256k1_gej pubs[128]; /* Candidate digits for our proof, most inferred. */
+ secp256k1_scalar s[128]; /* Signatures in our proof, most forged. */
+ secp256k1_scalar sec[32]; /* Blinding factors for the correct digits. */
+ secp256k1_scalar k[32]; /* Nonces for our non-forged signatures. */
+ secp256k1_scalar stmp;
+ secp256k1_sha256 sha256_m;
+ unsigned char prep[4096];
+ unsigned char tmp[33];
+ unsigned char *signs; /* Location of sign flags in the proof. */
+ uint64_t v;
+ uint64_t scale; /* scale = 10^exp. */
+ int mantissa; /* Number of bits proven in the blinded value. */
+ size_t rings; /* How many digits will our proof cover. */
+ size_t rsizes[32]; /* How many possible values there are for each place. */
+ size_t secidx[32]; /* Which digit is the correct one. */
+ size_t len; /* Number of bytes used so far. */
+ size_t i;
+ int overflow;
+ size_t npub;
+ len = 0;
+ if (*plen < 65 || min_value > value || min_bits > 64 || min_bits < 0 || exp < -1 || exp > 18) {
+ return 0;
+ }
+ if (!secp256k1_range_proveparams(&v, &rings, rsizes, &npub, secidx, &min_value, &mantissa, &scale, &exp, &min_bits, value)) {
+ return 0;
+ }
+ proof[len] = (rsizes[0] > 1 ? (64 | exp) : 0) | (min_value ? 32 : 0);
+ len++;
+ if (rsizes[0] > 1) {
+ VERIFY_CHECK(mantissa > 0 && mantissa <= 64);
+ proof[len] = mantissa - 1;
+ len++;
+ }
+ if (min_value) {
+ for (i = 0; i < 8; i++) {
+ proof[len + i] = (min_value >> ((7-i) * 8)) & 255;
+ }
+ len += 8;
+ }
+ /* Do we have enough room in the proof for the message? Each ring gives us 128 bytes, but the
+ * final ring is used to encode the blinding factor and the value, so we can't use that. (Well,
+ * technically there are 64 bytes available if we avoided the other data, but this is difficult
+ * because it's not always in the same place. */
+ if (msg_len > 0 && msg_len > 128 * (rings - 1)) {
+ return 0;
+ }
+ /* Do we have enough room for the proof? */
+ if (*plen - len < 32 * (npub + rings - 1) + 32 + ((rings+6) >> 3)) {
+ return 0;
+ }
+ secp256k1_sha256_initialize(&sha256_m);
+ secp256k1_rangeproof_serialize_point(tmp, commit);
+ secp256k1_sha256_write(&sha256_m, tmp, 33);
+ secp256k1_rangeproof_serialize_point(tmp, genp);
+ secp256k1_sha256_write(&sha256_m, tmp, 33);
+ secp256k1_sha256_write(&sha256_m, proof, len);
+
+ memset(prep, 0, 4096);
+ if (message != NULL) {
+ memcpy(prep, message, msg_len);
+ }
+ /* Note, the data corresponding to the blinding factors must be zero. */
+ if (rsizes[rings - 1] > 1) {
+ size_t idx;
+ /* Value encoding sidechannel. */
+ idx = rsizes[rings - 1] - 1;
+ idx -= secidx[rings - 1] == idx;
+ idx = ((rings - 1) * 4 + idx) * 32;
+ for (i = 0; i < 8; i++) {
+ prep[8 + i + idx] = prep[16 + i + idx] = prep[24 + i + idx] = (v >> (56 - i * 8)) & 255;
+ prep[i + idx] = 0;
+ }
+ prep[idx] = 128;
+ }
+ if (!secp256k1_rangeproof_genrand(sec, s, prep, rsizes, rings, nonce, commit, proof, len, genp)) {
+ return 0;
+ }
+ memset(prep, 0, 4096);
+ for (i = 0; i < rings; i++) {
+ /* Sign will overwrite the non-forged signature, move that random value into the nonce. */
+ k[i] = s[i * 4 + secidx[i]];
+ secp256k1_scalar_clear(&s[i * 4 + secidx[i]]);
+ }
+ /** Genrand returns the last blinding factor as -sum(rest),
+ * adding in the blinding factor for our commitment, results in the blinding factor for
+ * the commitment to the last digit that the verifier can compute for itself by subtracting
+ * all the digits in the proof from the commitment. This lets the prover skip sending the
+ * blinded value for one digit.
+ */
+ secp256k1_scalar_set_b32(&stmp, blind, &overflow);
+ secp256k1_scalar_add(&sec[rings - 1], &sec[rings - 1], &stmp);
+ if (overflow || secp256k1_scalar_is_zero(&sec[rings - 1])) {
+ return 0;
+ }
+ signs = &proof[len];
+ /* We need one sign bit for each blinded value we send. */
+ for (i = 0; i < (rings + 6) >> 3; i++) {
+ signs[i] = 0;
+ len++;
+ }
+ npub = 0;
+ for (i = 0; i < rings; i++) {
+ /*OPT: Use the precomputed gen2 basis?*/
+ secp256k1_pedersen_ecmult(&pubs[npub], &sec[i], ((uint64_t)secidx[i] * scale) << (i*2), genp, &secp256k1_ge_const_g);
+ if (secp256k1_gej_is_infinity(&pubs[npub])) {
+ return 0;
+ }
+ if (i < rings - 1) {
+ unsigned char tmpc[33];
+ secp256k1_ge c;
+ unsigned char quadness;
+ /*OPT: split loop and batch invert.*/
+ /*OPT: do not compute full pubs[npub] in ge form; we only need x */
+ secp256k1_ge_set_gej_var(&c, &pubs[npub]);
+ secp256k1_rangeproof_serialize_point(tmpc, &c);
+ quadness = tmpc[0];
+ secp256k1_sha256_write(&sha256_m, tmpc, 33);
+ signs[i>>3] |= quadness << (i&7);
+ memcpy(&proof[len], tmpc + 1, 32);
+ len += 32;
+ }
+ npub += rsizes[i];
+ }
+ secp256k1_rangeproof_pub_expand(pubs, exp, rsizes, rings, genp);
+ if (extra_commit != NULL) {
+ secp256k1_sha256_write(&sha256_m, extra_commit, extra_commit_len);
+ }
+ secp256k1_sha256_finalize(&sha256_m, tmp);
+ if (!secp256k1_borromean_sign(ecmult_ctx, ecmult_gen_ctx, &proof[len], s, pubs, k, sec, rsizes, secidx, rings, tmp, 32)) {
+ return 0;
+ }
+ len += 32;
+ for (i = 0; i < npub; i++) {
+ secp256k1_scalar_get_b32(&proof[len],&s[i]);
+ len += 32;
+ }
+ VERIFY_CHECK(len <= *plen);
+ *plen = len;
+ memset(prep, 0, 4096);
+ return 1;
+}
+
+/* Computes blinding factor x given k, s, and the challenge e. */
+SECP256K1_INLINE static void secp256k1_rangeproof_recover_x(secp256k1_scalar *x, const secp256k1_scalar *k, const secp256k1_scalar *e,
+ const secp256k1_scalar *s) {
+ secp256k1_scalar stmp;
+ secp256k1_scalar_negate(x, s);
+ secp256k1_scalar_add(x, x, k);
+ secp256k1_scalar_inverse(&stmp, e);
+ secp256k1_scalar_mul(x, x, &stmp);
+}
+
+/* Computes ring's nonce given the blinding factor x, the challenge e, and the signature s. */
+SECP256K1_INLINE static void secp256k1_rangeproof_recover_k(secp256k1_scalar *k, const secp256k1_scalar *x, const secp256k1_scalar *e,
+ const secp256k1_scalar *s) {
+ secp256k1_scalar stmp;
+ secp256k1_scalar_mul(&stmp, x, e);
+ secp256k1_scalar_add(k, s, &stmp);
+}
+
+SECP256K1_INLINE static void secp256k1_rangeproof_ch32xor(unsigned char *x, const unsigned char *y) {
+ int i;
+ for (i = 0; i < 32; i++) {
+ x[i] ^= y[i];
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_rangeproof_rewind_inner(secp256k1_scalar *blind, uint64_t *v,
+ unsigned char *m, size_t *mlen, secp256k1_scalar *ev, secp256k1_scalar *s,
+ size_t *rsizes, size_t rings, const unsigned char *nonce, const secp256k1_ge *commit, const unsigned char *proof, size_t len, const secp256k1_ge *genp) {
+ secp256k1_scalar s_orig[128];
+ secp256k1_scalar sec[32];
+ secp256k1_scalar stmp;
+ unsigned char prep[4096];
+ unsigned char tmp[32];
+ uint64_t value;
+ size_t offset;
+ size_t i;
+ size_t j;
+ int b;
+ size_t skip1;
+ size_t skip2;
+ size_t npub;
+ npub = ((rings - 1) << 2) + rsizes[rings-1];
+ VERIFY_CHECK(npub <= 128);
+ VERIFY_CHECK(npub >= 1);
+ memset(prep, 0, 4096);
+ /* Reconstruct the provers random values. */
+ secp256k1_rangeproof_genrand(sec, s_orig, prep, rsizes, rings, nonce, commit, proof, len, genp);
+ *v = UINT64_MAX;
+ secp256k1_scalar_clear(blind);
+ if (rings == 1 && rsizes[0] == 1) {
+ /* With only a single proof, we can only recover the blinding factor. */
+ secp256k1_rangeproof_recover_x(blind, &s_orig[0], &ev[0], &s[0]);
+ if (v) {
+ *v = 0;
+ }
+ if (mlen) {
+ *mlen = 0;
+ }
+ return 1;
+ }
+ npub = (rings - 1) << 2;
+ for (j = 0; j < 2; j++) {
+ size_t idx;
+ /* Look for a value encoding in the last ring. */
+ idx = npub + rsizes[rings - 1] - 1 - j;
+ secp256k1_scalar_get_b32(tmp, &s[idx]);
+ secp256k1_rangeproof_ch32xor(tmp, &prep[idx * 32]);
+ if ((tmp[0] & 128) && (memcmp(&tmp[16], &tmp[24], 8) == 0) && (memcmp(&tmp[8], &tmp[16], 8) == 0)) {
+ value = 0;
+ for (i = 0; i < 8; i++) {
+ value = (value << 8) + tmp[24 + i];
+ }
+ if (v) {
+ *v = value;
+ }
+ memcpy(&prep[idx * 32], tmp, 32);
+ break;
+ }
+ }
+ if (j > 1) {
+ /* Couldn't extract a value. */
+ if (mlen) {
+ *mlen = 0;
+ }
+ return 0;
+ }
+ skip1 = rsizes[rings - 1] - 1 - j;
+ skip2 = ((value >> ((rings - 1) << 1)) & 3);
+ if (skip1 == skip2) {
+ /*Value is in wrong position.*/
+ if (mlen) {
+ *mlen = 0;
+ }
+ return 0;
+ }
+ skip1 += (rings - 1) << 2;
+ skip2 += (rings - 1) << 2;
+ /* Like in the rsize[] == 1 case, Having figured out which s is the one which was not forged, we can recover the blinding factor. */
+ secp256k1_rangeproof_recover_x(&stmp, &s_orig[skip2], &ev[skip2], &s[skip2]);
+ secp256k1_scalar_negate(&sec[rings - 1], &sec[rings - 1]);
+ secp256k1_scalar_add(blind, &stmp, &sec[rings - 1]);
+ if (!m || !mlen || *mlen == 0) {
+ if (mlen) {
+ *mlen = 0;
+ }
+ /* FIXME: cleanup in early out/failure cases. */
+ return 1;
+ }
+ offset = 0;
+ npub = 0;
+ for (i = 0; i < rings; i++) {
+ size_t idx;
+ idx = (value >> (i << 1)) & 3;
+ for (j = 0; j < rsizes[i]; j++) {
+ if (npub == skip1 || npub == skip2) {
+ npub++;
+ continue;
+ }
+ if (idx == j) {
+ /** For the non-forged signatures the signature is calculated instead of random, instead we recover the prover's nonces.
+ * this could just as well recover the blinding factors and messages could be put there as is done for recovering the
+ * blinding factor in the last ring, but it takes an inversion to recover x so it's faster to put the message data in k.
+ */
+ secp256k1_rangeproof_recover_k(&stmp, &sec[i], &ev[npub], &s[npub]);
+ } else {
+ stmp = s[npub];
+ }
+ secp256k1_scalar_get_b32(tmp, &stmp);
+ secp256k1_rangeproof_ch32xor(tmp, &prep[npub * 32]);
+ for (b = 0; b < 32 && offset < *mlen; b++) {
+ m[offset] = tmp[b];
+ offset++;
+ }
+ npub++;
+ }
+ }
+ *mlen = offset;
+ memset(prep, 0, 4096);
+ for (i = 0; i < 128; i++) {
+ secp256k1_scalar_clear(&s_orig[i]);
+ }
+ for (i = 0; i < 32; i++) {
+ secp256k1_scalar_clear(&sec[i]);
+ }
+ secp256k1_scalar_clear(&stmp);
+ return 1;
+}
+
+SECP256K1_INLINE static int secp256k1_rangeproof_getheader_impl(size_t *offset, int *exp, int *mantissa, uint64_t *scale,
+ uint64_t *min_value, uint64_t *max_value, const unsigned char *proof, size_t plen) {
+ int i;
+ int has_nz_range;
+ int has_min;
+ if (plen < 65 || ((proof[*offset] & 128) != 0)) {
+ return 0;
+ }
+ has_nz_range = proof[*offset] & 64;
+ has_min = proof[*offset] & 32;
+ *exp = -1;
+ *mantissa = 0;
+ if (has_nz_range) {
+ *exp = proof[*offset] & 31;
+ *offset += 1;
+ if (*exp > 18) {
+ return 0;
+ }
+ *mantissa = proof[*offset] + 1;
+ if (*mantissa > 64) {
+ return 0;
+ }
+ *max_value = UINT64_MAX>>(64-*mantissa);
+ } else {
+ *max_value = 0;
+ }
+ *offset += 1;
+ *scale = 1;
+ for (i = 0; i < *exp; i++) {
+ if (*max_value > UINT64_MAX / 10) {
+ return 0;
+ }
+ *max_value *= 10;
+ *scale *= 10;
+ }
+ *min_value = 0;
+ if (has_min) {
+ if(plen - *offset < 8) {
+ return 0;
+ }
+ /*FIXME: Compact minvalue encoding?*/
+ for (i = 0; i < 8; i++) {
+ *min_value = (*min_value << 8) | proof[*offset + i];
+ }
+ *offset += 8;
+ }
+ if (*max_value > UINT64_MAX - *min_value) {
+ return 0;
+ }
+ *max_value += *min_value;
+ return 1;
+}
+
+/* Verifies range proof (len plen) for commit, the min/max values proven are put in the min/max arguments; returns 0 on failure 1 on success.*/
+SECP256K1_INLINE static int secp256k1_rangeproof_verify_impl(const secp256k1_ecmult_context* ecmult_ctx,
+ const secp256k1_ecmult_gen_context* ecmult_gen_ctx,
+ unsigned char *blindout, uint64_t *value_out, unsigned char *message_out, size_t *outlen, const unsigned char *nonce,
+ uint64_t *min_value, uint64_t *max_value, const secp256k1_ge *commit, const unsigned char *proof, size_t plen, const unsigned char *extra_commit, size_t extra_commit_len, const secp256k1_ge* genp) {
+ secp256k1_gej accj;
+ secp256k1_gej pubs[128];
+ secp256k1_ge c;
+ secp256k1_scalar s[128];
+ secp256k1_scalar evalues[128]; /* Challenges, only used during proof rewind. */
+ secp256k1_sha256 sha256_m;
+ size_t rsizes[32];
+ int ret;
+ size_t i;
+ int exp;
+ int mantissa;
+ size_t offset;
+ size_t rings;
+ int overflow;
+ size_t npub;
+ int offset_post_header;
+ uint64_t scale;
+ unsigned char signs[31];
+ unsigned char m[33];
+ const unsigned char *e0;
+ offset = 0;
+ if (!secp256k1_rangeproof_getheader_impl(&offset, &exp, &mantissa, &scale, min_value, max_value, proof, plen)) {
+ return 0;
+ }
+ offset_post_header = offset;
+ rings = 1;
+ rsizes[0] = 1;
+ npub = 1;
+ if (mantissa != 0) {
+ rings = (mantissa >> 1);
+ for (i = 0; i < rings; i++) {
+ rsizes[i] = 4;
+ }
+ npub = (mantissa >> 1) << 2;
+ if (mantissa & 1) {
+ rsizes[rings] = 2;
+ npub += rsizes[rings];
+ rings++;
+ }
+ }
+ VERIFY_CHECK(rings <= 32);
+ if (plen - offset < 32 * (npub + rings - 1) + 32 + ((rings+6) >> 3)) {
+ return 0;
+ }
+ secp256k1_sha256_initialize(&sha256_m);
+ secp256k1_rangeproof_serialize_point(m, commit);
+ secp256k1_sha256_write(&sha256_m, m, 33);
+ secp256k1_rangeproof_serialize_point(m, genp);
+ secp256k1_sha256_write(&sha256_m, m, 33);
+ secp256k1_sha256_write(&sha256_m, proof, offset);
+ for(i = 0; i < rings - 1; i++) {
+ signs[i] = (proof[offset + ( i>> 3)] & (1 << (i & 7))) != 0;
+ }
+ offset += (rings + 6) >> 3;
+ if ((rings - 1) & 7) {
+ /* Number of coded blinded points is not a multiple of 8, force extra sign bits to 0 to reject mutation. */
+ if ((proof[offset - 1] >> ((rings - 1) & 7)) != 0) {
+ return 0;
+ }
+ }
+ npub = 0;
+ secp256k1_gej_set_infinity(&accj);
+ if (*min_value) {
+ secp256k1_scalar mvs;
+ secp256k1_scalar_set_u64(&mvs, *min_value);
+ secp256k1_ecmult_const(&accj, genp, &mvs, 64);
+ secp256k1_scalar_clear(&mvs);
+ }
+ for(i = 0; i < rings - 1; i++) {
+ secp256k1_fe fe;
+ secp256k1_fe_set_b32(&fe, &proof[offset]);
+ secp256k1_ge_set_xquad(&c, &fe);
+ if (signs[i]) {
+ secp256k1_ge_neg(&c, &c);
+ }
+ /* Not using secp256k1_rangeproof_serialize_point as we almost have it
+ * serialized form already. */
+ secp256k1_sha256_write(&sha256_m, &signs[i], 1);
+ secp256k1_sha256_write(&sha256_m, &proof[offset], 32);
+ secp256k1_gej_set_ge(&pubs[npub], &c);
+ secp256k1_gej_add_ge_var(&accj, &accj, &c, NULL);
+ offset += 32;
+ npub += rsizes[i];
+ }
+ secp256k1_gej_neg(&accj, &accj);
+ secp256k1_gej_add_ge_var(&pubs[npub], &accj, commit, NULL);
+ if (secp256k1_gej_is_infinity(&pubs[npub])) {
+ return 0;
+ }
+ secp256k1_rangeproof_pub_expand(pubs, exp, rsizes, rings, genp);
+ npub += rsizes[rings - 1];
+ e0 = &proof[offset];
+ offset += 32;
+ for (i = 0; i < npub; i++) {
+ secp256k1_scalar_set_b32(&s[i], &proof[offset], &overflow);
+ if (overflow) {
+ return 0;
+ }
+ offset += 32;
+ }
+ if (offset != plen) {
+ /*Extra data found, reject.*/
+ return 0;
+ }
+ if (extra_commit != NULL) {
+ secp256k1_sha256_write(&sha256_m, extra_commit, extra_commit_len);
+ }
+ secp256k1_sha256_finalize(&sha256_m, m);
+ ret = secp256k1_borromean_verify(ecmult_ctx, nonce ? evalues : NULL, e0, s, pubs, rsizes, rings, m, 32);
+ if (ret && nonce) {
+ /* Given the nonce, try rewinding the witness to recover its initial state. */
+ secp256k1_scalar blind;
+ uint64_t vv;
+ if (!ecmult_gen_ctx) {
+ return 0;
+ }
+ if (!secp256k1_rangeproof_rewind_inner(&blind, &vv, message_out, outlen, evalues, s, rsizes, rings, nonce, commit, proof, offset_post_header, genp)) {
+ return 0;
+ }
+ /* Unwind apparently successful, see if the commitment can be reconstructed. */
+ /* FIXME: should check vv is in the mantissa's range. */
+ vv = (vv * scale) + *min_value;
+ secp256k1_pedersen_ecmult(&accj, &blind, vv, genp, &secp256k1_ge_const_g);
+ if (secp256k1_gej_is_infinity(&accj)) {
+ return 0;
+ }
+ secp256k1_gej_neg(&accj, &accj);
+ secp256k1_gej_add_ge_var(&accj, &accj, commit, NULL);
+ if (!secp256k1_gej_is_infinity(&accj)) {
+ return 0;
+ }
+ if (blindout) {
+ secp256k1_scalar_get_b32(blindout, &blind);
+ }
+ if (value_out) {
+ *value_out = vv;
+ }
+ }
+ return ret;
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/tests_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/tests_impl.h
new file mode 100644
index 0000000..921498d
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/rangeproof/tests_impl.h
@@ -0,0 +1,439 @@
+/**********************************************************************
+ * Copyright (c) 2015 Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_RANGEPROOF_TESTS
+#define SECP256K1_MODULE_RANGEPROOF_TESTS
+
+#include
+
+#include "group.h"
+#include "scalar.h"
+#include "testrand.h"
+#include "util.h"
+
+#include "include/secp256k1_commitment.h"
+#include "include/secp256k1_rangeproof.h"
+
+static void test_rangeproof_api(const secp256k1_context *none, const secp256k1_context *sign, const secp256k1_context *vrfy, const secp256k1_context *both, const int32_t *ecount) {
+ unsigned char proof[5134];
+ unsigned char blind[32];
+ secp256k1_pedersen_commitment commit;
+ uint64_t vmin = secp256k1_rand32();
+ uint64_t val = vmin + secp256k1_rand32();
+ size_t len = sizeof(proof);
+ /* we'll switch to dylan thomas for this one */
+ const unsigned char message[68] = "My tears are like the quiet drift / Of petals from some magic rose;";
+ size_t mlen = sizeof(message);
+ const unsigned char ext_commit[72] = "And all my grief flows from the rift / Of unremembered skies and snows.";
+ size_t ext_commit_len = sizeof(ext_commit);
+
+ secp256k1_rand256(blind);
+ CHECK(secp256k1_pedersen_commit(ctx, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g));
+
+ CHECK(secp256k1_rangeproof_sign(none, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 1);
+ CHECK(secp256k1_rangeproof_sign(sign, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 2);
+ CHECK(secp256k1_rangeproof_sign(vrfy, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 3);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) != 0);
+ CHECK(*ecount == 3);
+
+ CHECK(secp256k1_rangeproof_sign(both, NULL, &len, vmin, &commit, blind, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 4);
+ CHECK(secp256k1_rangeproof_sign(both, proof, NULL, vmin, &commit, blind, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 5);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, NULL, blind, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 6);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, NULL, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 7);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, NULL, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 8);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, commit.data, 0, 0, vmin - 1, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 8);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, NULL, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 9);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, NULL, 0, ext_commit, ext_commit_len, &secp256k1_generator_const_h) != 0);
+ CHECK(*ecount == 9);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, NULL, 0, NULL, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 10);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, NULL, 0, NULL, 0, &secp256k1_generator_const_h) != 0);
+ CHECK(*ecount == 10);
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, NULL, 0, NULL, 0, NULL) == 0);
+ CHECK(*ecount == 11);
+
+ CHECK(secp256k1_rangeproof_sign(both, proof, &len, vmin, &commit, blind, commit.data, 0, 0, val, message, mlen, ext_commit, ext_commit_len, &secp256k1_generator_const_h) != 0);
+ {
+ int exp;
+ int mantissa;
+ uint64_t min_value;
+ uint64_t max_value;
+ CHECK(secp256k1_rangeproof_info(none, &exp, &mantissa, &min_value, &max_value, proof, len) != 0);
+ CHECK(exp == 0);
+ CHECK(((uint64_t) 1 << mantissa) > val - vmin);
+ CHECK(((uint64_t) 1 << (mantissa - 1)) <= val - vmin);
+ CHECK(min_value == vmin);
+ CHECK(max_value >= val);
+
+ CHECK(secp256k1_rangeproof_info(none, NULL, &mantissa, &min_value, &max_value, proof, len) == 0);
+ CHECK(*ecount == 12);
+ CHECK(secp256k1_rangeproof_info(none, &exp, NULL, &min_value, &max_value, proof, len) == 0);
+ CHECK(*ecount == 13);
+ CHECK(secp256k1_rangeproof_info(none, &exp, &mantissa, NULL, &max_value, proof, len) == 0);
+ CHECK(*ecount == 14);
+ CHECK(secp256k1_rangeproof_info(none, &exp, &mantissa, &min_value, NULL, proof, len) == 0);
+ CHECK(*ecount == 15);
+ CHECK(secp256k1_rangeproof_info(none, &exp, &mantissa, &min_value, &max_value, NULL, len) == 0);
+ CHECK(*ecount == 16);
+ CHECK(secp256k1_rangeproof_info(none, &exp, &mantissa, &min_value, &max_value, proof, 0) == 0);
+ CHECK(*ecount == 16);
+ }
+ {
+ uint64_t min_value;
+ uint64_t max_value;
+ CHECK(secp256k1_rangeproof_verify(none, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 17);
+ CHECK(secp256k1_rangeproof_verify(sign, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 18);
+ CHECK(secp256k1_rangeproof_verify(vrfy, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) != 0);
+ CHECK(*ecount == 18);
+
+ CHECK(secp256k1_rangeproof_verify(vrfy, NULL, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 19);
+ CHECK(secp256k1_rangeproof_verify(vrfy, &min_value, NULL, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 20);
+ CHECK(secp256k1_rangeproof_verify(vrfy, &min_value, &max_value, NULL, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 21);
+ CHECK(secp256k1_rangeproof_verify(vrfy, &min_value, &max_value, &commit, NULL, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 22);
+ CHECK(secp256k1_rangeproof_verify(vrfy, &min_value, &max_value, &commit, proof, 0, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 22);
+ CHECK(secp256k1_rangeproof_verify(vrfy, &min_value, &max_value, &commit, proof, len, NULL, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 23);
+ CHECK(secp256k1_rangeproof_verify(vrfy, &min_value, &max_value, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 23);
+ CHECK(secp256k1_rangeproof_verify(vrfy, &min_value, &max_value, &commit, proof, len, NULL, 0, NULL) == 0);
+ CHECK(*ecount == 24);
+ }
+ {
+ unsigned char blind_out[32];
+ unsigned char message_out[68];
+ uint64_t value_out;
+ uint64_t min_value;
+ uint64_t max_value;
+ size_t message_len = sizeof(message_out);
+
+ CHECK(secp256k1_rangeproof_rewind(none, blind_out, &value_out, message_out, &message_len, commit.data, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 25);
+ CHECK(secp256k1_rangeproof_rewind(sign, blind_out, &value_out, message_out, &message_len, commit.data, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 26);
+ CHECK(secp256k1_rangeproof_rewind(vrfy, blind_out, &value_out, message_out, &message_len, commit.data, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 27);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, message_out, &message_len, commit.data, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) != 0);
+ CHECK(*ecount == 27);
+
+ CHECK(min_value == vmin);
+ CHECK(max_value >= val);
+ CHECK(value_out == val);
+ CHECK(message_len == sizeof(message_out));
+ CHECK(memcmp(message, message_out, sizeof(message_out)) == 0);
+
+ CHECK(secp256k1_rangeproof_rewind(both, NULL, &value_out, message_out, &message_len, commit.data, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) != 0);
+ CHECK(*ecount == 27); /* blindout may be NULL */
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, NULL, message_out, &message_len, commit.data, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) != 0);
+ CHECK(*ecount == 27); /* valueout may be NULL */
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, &message_len, commit.data, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 28);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) != 0);
+ CHECK(*ecount == 28);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, NULL, &min_value, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 29);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, NULL, &max_value, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 30);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, &min_value, NULL, &commit, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 31);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, &min_value, &max_value, NULL, proof, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 32);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, &min_value, &max_value, &commit, NULL, len, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 33);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, &min_value, &max_value, &commit, proof, 0, ext_commit, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 33);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, &min_value, &max_value, &commit, proof, len, NULL, ext_commit_len, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 34);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, &min_value, &max_value, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h) == 0);
+ CHECK(*ecount == 34);
+ CHECK(secp256k1_rangeproof_rewind(both, blind_out, &value_out, NULL, 0, commit.data, &min_value, &max_value, &commit, proof, len, NULL, 0, NULL) == 0);
+ CHECK(*ecount == 35);
+ }
+}
+
+static void test_api(void) {
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ int32_t ecount;
+ int i;
+
+ secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
+
+ for (i = 0; i < count; i++) {
+ ecount = 0;
+ test_rangeproof_api(none, sign, vrfy, both, &ecount);
+ }
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+}
+
+static void test_borromean(void) {
+ unsigned char e0[32];
+ secp256k1_scalar s[64];
+ secp256k1_gej pubs[64];
+ secp256k1_scalar k[8];
+ secp256k1_scalar sec[8];
+ secp256k1_ge ge;
+ secp256k1_scalar one;
+ unsigned char m[32];
+ size_t rsizes[8];
+ size_t secidx[8];
+ size_t nrings;
+ size_t i;
+ size_t j;
+ int c;
+ secp256k1_rand256_test(m);
+ nrings = 1 + (secp256k1_rand32()&7);
+ c = 0;
+ secp256k1_scalar_set_int(&one, 1);
+ if (secp256k1_rand32()&1) {
+ secp256k1_scalar_negate(&one, &one);
+ }
+ for (i = 0; i < nrings; i++) {
+ rsizes[i] = 1 + (secp256k1_rand32()&7);
+ secidx[i] = secp256k1_rand32() % rsizes[i];
+ random_scalar_order(&sec[i]);
+ random_scalar_order(&k[i]);
+ if(secp256k1_rand32()&7) {
+ sec[i] = one;
+ }
+ if(secp256k1_rand32()&7) {
+ k[i] = one;
+ }
+ for (j = 0; j < rsizes[i]; j++) {
+ random_scalar_order(&s[c + j]);
+ if(secp256k1_rand32()&7) {
+ s[i] = one;
+ }
+ if (j == secidx[i]) {
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubs[c + j], &sec[i]);
+ } else {
+ random_group_element_test(&ge);
+ random_group_element_jacobian_test(&pubs[c + j],&ge);
+ }
+ }
+ c += rsizes[i];
+ }
+ CHECK(secp256k1_borromean_sign(&ctx->ecmult_ctx, &ctx->ecmult_gen_ctx, e0, s, pubs, k, sec, rsizes, secidx, nrings, m, 32));
+ CHECK(secp256k1_borromean_verify(&ctx->ecmult_ctx, NULL, e0, s, pubs, rsizes, nrings, m, 32));
+ i = secp256k1_rand32() % c;
+ secp256k1_scalar_negate(&s[i],&s[i]);
+ CHECK(!secp256k1_borromean_verify(&ctx->ecmult_ctx, NULL, e0, s, pubs, rsizes, nrings, m, 32));
+ secp256k1_scalar_negate(&s[i],&s[i]);
+ secp256k1_scalar_set_int(&one, 1);
+ for(j = 0; j < 4; j++) {
+ i = secp256k1_rand32() % c;
+ if (secp256k1_rand32() & 1) {
+ secp256k1_gej_double_var(&pubs[i],&pubs[i], NULL);
+ } else {
+ secp256k1_scalar_add(&s[i],&s[i],&one);
+ }
+ CHECK(!secp256k1_borromean_verify(&ctx->ecmult_ctx, NULL, e0, s, pubs, rsizes, nrings, m, 32));
+ }
+}
+
+static void test_rangeproof(void) {
+ const uint64_t testvs[11] = {0, 1, 5, 11, 65535, 65537, INT32_MAX, UINT32_MAX, INT64_MAX - 1, INT64_MAX, UINT64_MAX};
+ secp256k1_pedersen_commitment commit;
+ secp256k1_pedersen_commitment commit2;
+ unsigned char proof[5134 + 1]; /* One additional byte to test if trailing bytes are rejected */
+ unsigned char blind[32];
+ unsigned char blindout[32];
+ unsigned char message[4096];
+ size_t mlen;
+ uint64_t v;
+ uint64_t vout;
+ uint64_t vmin;
+ uint64_t minv;
+ uint64_t maxv;
+ size_t len;
+ size_t i;
+ size_t j;
+ size_t k;
+ /* Short message is a Simone de Beauvoir quote */
+ const unsigned char message_short[120] = "When I see my own likeness in the depths of someone else's consciousness, I always experience a moment of panic.";
+ /* Long message is 0xA5 with a bunch of this quote in the middle */
+ unsigned char message_long[3968];
+ memset(message_long, 0xa5, sizeof(message_long));
+ for (i = 1200; i < 3600; i += 120) {
+ memcpy(&message_long[i], message_short, sizeof(message_short));
+ }
+
+ secp256k1_rand256(blind);
+ for (i = 0; i < 11; i++) {
+ v = testvs[i];
+ CHECK(secp256k1_pedersen_commit(ctx, &commit, blind, v, &secp256k1_generator_const_h, &secp256k1_generator_const_g));
+ for (vmin = 0; vmin < (i<9 && i > 0 ? 2 : 1); vmin++) {
+ const unsigned char *input_message = NULL;
+ size_t input_message_len = 0;
+ /* vmin is always either 0 or 1; if it is 1, then we have no room for a message.
+ * If it's 0, we use "minimum encoding" and only have room for a small message when
+ * `testvs[i]` is >= 4; for a large message when it's >= 2^32. */
+ if (vmin == 0 && i > 2) {
+ input_message = message_short;
+ input_message_len = sizeof(message_short);
+ }
+ if (vmin == 0 && i > 7) {
+ input_message = message_long;
+ input_message_len = sizeof(message_long);
+ }
+ len = 5134;
+ CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, vmin, &commit, blind, commit.data, 0, 0, v, input_message, input_message_len, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(len <= 5134);
+ mlen = 4096;
+ CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, message, &mlen, commit.data, &minv, &maxv, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ if (input_message != NULL) {
+ CHECK(memcmp(message, input_message, input_message_len) == 0);
+ }
+ for (j = input_message_len; j < mlen; j++) {
+ CHECK(message[j] == 0);
+ }
+ CHECK(mlen <= 4096);
+ CHECK(memcmp(blindout, blind, 32) == 0);
+ CHECK(vout == v);
+ CHECK(minv <= v);
+ CHECK(maxv >= v);
+ len = 5134;
+ CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, v, &commit, blind, commit.data, -1, 64, v, NULL, 0, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(len <= 73);
+ CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, NULL, NULL, commit.data, &minv, &maxv, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(memcmp(blindout, blind, 32) == 0);
+ CHECK(vout == v);
+ CHECK(minv == v);
+ CHECK(maxv == v);
+
+ /* Check with a committed message */
+ len = 5134;
+ CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, v, &commit, blind, commit.data, -1, 64, v, NULL, 0, message_short, sizeof(message_short), &secp256k1_generator_const_h));
+ CHECK(len <= 73);
+ CHECK(!secp256k1_rangeproof_rewind(ctx, blindout, &vout, NULL, NULL, commit.data, &minv, &maxv, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(!secp256k1_rangeproof_rewind(ctx, blindout, &vout, NULL, NULL, commit.data, &minv, &maxv, &commit, proof, len, message_long, sizeof(message_long), &secp256k1_generator_const_h));
+ CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, NULL, NULL, commit.data, &minv, &maxv, &commit, proof, len, message_short, sizeof(message_short), &secp256k1_generator_const_h));
+ CHECK(memcmp(blindout, blind, 32) == 0);
+ CHECK(vout == v);
+ CHECK(minv == v);
+ CHECK(maxv == v);
+ }
+ }
+ secp256k1_rand256(blind);
+ v = INT64_MAX - 1;
+ CHECK(secp256k1_pedersen_commit(ctx, &commit, blind, v, &secp256k1_generator_const_h, &secp256k1_generator_const_g));
+ for (i = 0; i < 19; i++) {
+ len = 5134;
+ CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, 0, &commit, blind, commit.data, i, 0, v, NULL, 0, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(secp256k1_rangeproof_verify(ctx, &minv, &maxv, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(len <= 5134);
+ CHECK(minv <= v);
+ CHECK(maxv >= v);
+ /* Make sure it fails when validating with a committed message */
+ CHECK(!secp256k1_rangeproof_verify(ctx, &minv, &maxv, &commit, proof, len, message_short, sizeof(message_short), &secp256k1_generator_const_h));
+ }
+ secp256k1_rand256(blind);
+ {
+ /*Malleability test.*/
+ v = secp256k1_rands64(0, 255);
+ CHECK(secp256k1_pedersen_commit(ctx, &commit, blind, v, &secp256k1_generator_const_h, &secp256k1_generator_const_g));
+ len = 5134;
+ CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, 0, &commit, blind, commit.data, 0, 3, v, NULL, 0, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(len <= 5134);
+ /* Test if trailing bytes are rejected. */
+ proof[len] = v;
+ CHECK(!secp256k1_rangeproof_verify(ctx, &minv, &maxv, &commit, proof, len + 1, NULL, 0, &secp256k1_generator_const_h));
+ for (i = 0; i < len*8; i++) {
+ proof[i >> 3] ^= 1 << (i & 7);
+ CHECK(!secp256k1_rangeproof_verify(ctx, &minv, &maxv, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ proof[i >> 3] ^= 1 << (i & 7);
+ }
+ CHECK(secp256k1_rangeproof_verify(ctx, &minv, &maxv, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(minv <= v);
+ CHECK(maxv >= v);
+ }
+ memcpy(&commit2, &commit, sizeof(commit));
+ for (i = 0; i < 10 * (size_t) count; i++) {
+ int exp;
+ int min_bits;
+ v = secp256k1_rands64(0, UINT64_MAX >> (secp256k1_rand32()&63));
+ vmin = 0;
+ if ((v < INT64_MAX) && (secp256k1_rand32()&1)) {
+ vmin = secp256k1_rands64(0, v);
+ }
+ secp256k1_rand256(blind);
+ CHECK(secp256k1_pedersen_commit(ctx, &commit, blind, v, &secp256k1_generator_const_h, &secp256k1_generator_const_g));
+ len = 5134;
+ exp = (int)secp256k1_rands64(0,18)-(int)secp256k1_rands64(0,18);
+ if (exp < 0) {
+ exp = -exp;
+ }
+ min_bits = (int)secp256k1_rands64(0,64)-(int)secp256k1_rands64(0,64);
+ if (min_bits < 0) {
+ min_bits = -min_bits;
+ }
+ CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, vmin, &commit, blind, commit.data, exp, min_bits, v, NULL, 0, NULL, 0, &secp256k1_generator_const_h));
+ CHECK(len <= 5134);
+ mlen = 4096;
+ CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, message, &mlen, commit.data, &minv, &maxv, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ for (j = 0; j < mlen; j++) {
+ CHECK(message[j] == 0);
+ }
+ CHECK(mlen <= 4096);
+ CHECK(memcmp(blindout, blind, 32) == 0);
+ CHECK(vout == v);
+ CHECK(minv <= v);
+ CHECK(maxv >= v);
+ CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, NULL, NULL, commit.data, &minv, &maxv, &commit, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ memcpy(&commit2, &commit, sizeof(commit));
+ }
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < 96; i++) {
+ secp256k1_rand256(&proof[i * 32]);
+ }
+ for (k = 0; k < 128; k++) {
+ len = k;
+ CHECK(!secp256k1_rangeproof_verify(ctx, &minv, &maxv, &commit2, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ }
+ len = secp256k1_rands64(0, 3072);
+ CHECK(!secp256k1_rangeproof_verify(ctx, &minv, &maxv, &commit2, proof, len, NULL, 0, &secp256k1_generator_const_h));
+ }
+}
+
+void run_rangeproof_tests(void) {
+ int i;
+ test_api();
+ for (i = 0; i < 10*count; i++) {
+ test_borromean();
+ }
+ test_rangeproof();
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include
new file mode 100644
index 0000000..bf23c26
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include
@@ -0,0 +1,8 @@
+include_HEADERS += include/secp256k1_recovery.h
+noinst_HEADERS += src/modules/recovery/main_impl.h
+noinst_HEADERS += src/modules/recovery/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_recover
+bench_recover_SOURCES = src/bench_recover.c
+bench_recover_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
+endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h
new file mode 100755
index 0000000..2f6691c
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h
@@ -0,0 +1,193 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_RECOVERY_MAIN_H
+#define SECP256K1_MODULE_RECOVERY_MAIN_H
+
+#include "include/secp256k1_recovery.h"
+
+static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) {
+ (void)ctx;
+ if (sizeof(secp256k1_scalar) == 32) {
+ /* When the secp256k1_scalar type is exactly 32 byte, use its
+ * representation inside secp256k1_ecdsa_signature, as conversion is very fast.
+ * Note that secp256k1_ecdsa_signature_save must use the same representation. */
+ memcpy(r, &sig->data[0], 32);
+ memcpy(s, &sig->data[32], 32);
+ } else {
+ secp256k1_scalar_set_b32(r, &sig->data[0], NULL);
+ secp256k1_scalar_set_b32(s, &sig->data[32], NULL);
+ }
+ *recid = sig->data[64];
+}
+
+static void secp256k1_ecdsa_recoverable_signature_save(secp256k1_ecdsa_recoverable_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s, int recid) {
+ if (sizeof(secp256k1_scalar) == 32) {
+ memcpy(&sig->data[0], r, 32);
+ memcpy(&sig->data[32], s, 32);
+ } else {
+ secp256k1_scalar_get_b32(&sig->data[0], r);
+ secp256k1_scalar_get_b32(&sig->data[32], s);
+ }
+ sig->data[64] = recid;
+}
+
+int secp256k1_ecdsa_recoverable_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) {
+ secp256k1_scalar r, s;
+ int ret = 1;
+ int overflow = 0;
+
+ (void)ctx;
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input64 != NULL);
+ ARG_CHECK(recid >= 0 && recid <= 3);
+
+ secp256k1_scalar_set_b32(&r, &input64[0], &overflow);
+ ret &= !overflow;
+ secp256k1_scalar_set_b32(&s, &input64[32], &overflow);
+ ret &= !overflow;
+ if (ret) {
+ secp256k1_ecdsa_recoverable_signature_save(sig, &r, &s, recid);
+ } else {
+ memset(sig, 0, sizeof(*sig));
+ }
+ return ret;
+}
+
+int secp256k1_ecdsa_recoverable_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, int *recid, const secp256k1_ecdsa_recoverable_signature* sig) {
+ secp256k1_scalar r, s;
+
+ (void)ctx;
+ ARG_CHECK(output64 != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(recid != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig);
+ secp256k1_scalar_get_b32(&output64[0], &r);
+ secp256k1_scalar_get_b32(&output64[32], &s);
+ return 1;
+}
+
+int secp256k1_ecdsa_recoverable_signature_convert(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const secp256k1_ecdsa_recoverable_signature* sigin) {
+ secp256k1_scalar r, s;
+ int recid;
+
+ (void)ctx;
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(sigin != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin);
+ secp256k1_ecdsa_signature_save(sig, &r, &s);
+ return 1;
+}
+
+static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar* sigs, secp256k1_ge *pubkey, const secp256k1_scalar *message, int recid) {
+ unsigned char brx[32];
+ secp256k1_fe fx;
+ secp256k1_ge x;
+ secp256k1_gej xj;
+ secp256k1_scalar rn, u1, u2;
+ secp256k1_gej qj;
+ int r;
+
+ if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
+ return 0;
+ }
+
+ secp256k1_scalar_get_b32(brx, sigr);
+ r = secp256k1_fe_set_b32(&fx, brx);
+ (void)r;
+ VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */
+ if (recid & 2) {
+ if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
+ return 0;
+ }
+ secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe);
+ }
+ if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) {
+ return 0;
+ }
+ secp256k1_gej_set_ge(&xj, &x);
+ secp256k1_scalar_inverse_var(&rn, sigr);
+ secp256k1_scalar_mul(&u1, &rn, message);
+ secp256k1_scalar_negate(&u1, &u1);
+ secp256k1_scalar_mul(&u2, &rn, sigs);
+ secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1);
+ secp256k1_ge_set_gej_var(pubkey, &qj);
+ return !secp256k1_gej_is_infinity(&qj);
+}
+
+int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
+ secp256k1_scalar r, s;
+ secp256k1_scalar sec, non, msg;
+ int recid;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(seckey != NULL);
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ /* Fail if the secret key is invalid. */
+ if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
+ unsigned char nonce32[32];
+ unsigned int count = 0;
+ secp256k1_scalar_set_b32(&msg, msg32, NULL);
+ while (1) {
+ ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&non, nonce32, &overflow);
+ if (!secp256k1_scalar_is_zero(&non) && !overflow) {
+ if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) {
+ break;
+ }
+ }
+ count++;
+ }
+ memset(nonce32, 0, 32);
+ secp256k1_scalar_clear(&msg);
+ secp256k1_scalar_clear(&non);
+ secp256k1_scalar_clear(&sec);
+ }
+ if (ret) {
+ secp256k1_ecdsa_recoverable_signature_save(signature, &r, &s, recid);
+ } else {
+ memset(signature, 0, sizeof(*signature));
+ }
+ return ret;
+}
+
+int secp256k1_ecdsa_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32) {
+ secp256k1_ge q;
+ secp256k1_scalar r, s;
+ secp256k1_scalar m;
+ int recid;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature);
+ VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */
+ secp256k1_scalar_set_b32(&m, msg32, NULL);
+ if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) {
+ secp256k1_pubkey_save(pubkey, &q);
+ return 1;
+ } else {
+ memset(pubkey, 0, sizeof(*pubkey));
+ return 0;
+ }
+}
+
+#endif /* SECP256K1_MODULE_RECOVERY_MAIN_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h
new file mode 100644
index 0000000..5c9bbe8
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h
@@ -0,0 +1,393 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_RECOVERY_TESTS_H
+#define SECP256K1_MODULE_RECOVERY_TESTS_H
+
+static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ (void) msg32;
+ (void) key32;
+ (void) algo16;
+ (void) data;
+
+ /* On the first run, return 0 to force a second run */
+ if (counter == 0) {
+ memset(nonce32, 0, 32);
+ return 1;
+ }
+ /* On the second run, return an overflow to force a third run */
+ if (counter == 1) {
+ memset(nonce32, 0xff, 32);
+ return 1;
+ }
+ /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
+ memset(nonce32, 1, 32);
+ return secp256k1_rand_bits(1);
+}
+
+void test_ecdsa_recovery_api(void) {
+ /* Setup contexts that just count errors */
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey recpubkey;
+ secp256k1_ecdsa_signature normal_sig;
+ secp256k1_ecdsa_recoverable_signature recsig;
+ unsigned char privkey[32] = { 1 };
+ unsigned char message[32] = { 2 };
+ int32_t ecount = 0;
+ int recid = 0;
+ unsigned char sig[74];
+ unsigned char zero_privkey[32] = { 0 };
+ unsigned char over_privkey[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Check bad contexts and NULLs for signing */
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0);
+ CHECK(ecount == 5);
+ /* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */
+ secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL);
+ CHECK(ecount == 5);
+ /* These will all fail, but not in ARG_CHECK way */
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0);
+ /* This one will succeed. */
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
+ CHECK(ecount == 5);
+
+ /* Check signing with a goofy nonce function */
+
+ /* Check bad contexts and NULLs for recovery */
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_recover(none, &recpubkey, &recsig, message) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, message) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recover(both, NULL, &recsig, message) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_recover(both, &recpubkey, NULL, message) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0);
+ CHECK(ecount == 5);
+
+ /* Check NULLs for conversion */
+ CHECK(secp256k1_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1);
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1);
+
+ /* Check NULLs for de/serialization */
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1);
+
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0);
+ CHECK(ecount == 7);
+ /* overflow in signature will fail but not affect ecount */
+ memcpy(sig, over_privkey, 32);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0);
+ CHECK(ecount == 7);
+
+ /* cleanup */
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+}
+
+void test_ecdsa_recovery_end_to_end(void) {
+ unsigned char extra[32] = {0x00};
+ unsigned char privkey[32];
+ unsigned char message[32];
+ secp256k1_ecdsa_signature signature[5];
+ secp256k1_ecdsa_recoverable_signature rsignature[5];
+ unsigned char sig[74];
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey recpubkey;
+ int recid = 0;
+
+ /* Generate a random key and message. */
+ {
+ secp256k1_scalar msg, key;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(privkey, &key);
+ secp256k1_scalar_get_b32(message, &msg);
+ }
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Serialize/parse compact and verify/recover. */
+ extra[0] = 0;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1);
+ extra[31] = 1;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1);
+ extra[31] = 0;
+ extra[0] = 1;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(memcmp(&signature[4], &signature[0], 64) == 0);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
+ memset(&rsignature[4], 0, sizeof(rsignature[4]));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
+ /* Parse compact (with recovery id) and recover. */
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
+ CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
+ /* Serialize/destroy/parse signature and verify again. */
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
+ sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
+ /* Recover again */
+ CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
+ memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
+}
+
+/* Tests several edge cases. */
+void test_ecdsa_recovery_edge_cases(void) {
+ const unsigned char msg32[32] = {
+ 'T', 'h', 'i', 's', ' ', 'i', 's', ' ',
+ 'a', ' ', 'v', 'e', 'r', 'y', ' ', 's',
+ 'e', 'c', 'r', 'e', 't', ' ', 'm', 'e',
+ 's', 's', 'a', 'g', 'e', '.', '.', '.'
+ };
+ const unsigned char sig64[64] = {
+ /* Generated by signing the above message with nonce 'This is the nonce we will use...'
+ * and secret key 0 (which is not valid), resulting in recid 0. */
+ 0x67, 0xCB, 0x28, 0x5F, 0x9C, 0xD1, 0x94, 0xE8,
+ 0x40, 0xD6, 0x29, 0x39, 0x7A, 0xF5, 0x56, 0x96,
+ 0x62, 0xFD, 0xE4, 0x46, 0x49, 0x99, 0x59, 0x63,
+ 0x17, 0x9A, 0x7D, 0xD1, 0x7B, 0xD2, 0x35, 0x32,
+ 0x4B, 0x1B, 0x7D, 0xF3, 0x4C, 0xE1, 0xF6, 0x8E,
+ 0x69, 0x4F, 0xF6, 0xF1, 0x1A, 0xC7, 0x51, 0xDD,
+ 0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86,
+ 0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57
+ };
+ secp256k1_pubkey pubkey;
+ /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */
+ const unsigned char sigb64[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ secp256k1_pubkey pubkeyb;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ int recid;
+
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1));
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+
+ for (recid = 0; recid < 4; recid++) {
+ int i;
+ int recid2;
+ /* (4,4) encoded in DER. */
+ unsigned char sigbder[8] = {0x30, 0x06, 0x02, 0x01, 0x04, 0x02, 0x01, 0x04};
+ unsigned char sigcder_zr[7] = {0x30, 0x05, 0x02, 0x00, 0x02, 0x01, 0x01};
+ unsigned char sigcder_zs[7] = {0x30, 0x05, 0x02, 0x01, 0x01, 0x02, 0x00};
+ unsigned char sigbderalt1[39] = {
+ 0x30, 0x25, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
+ };
+ unsigned char sigbderalt2[39] = {
+ 0x30, 0x25, 0x02, 0x01, 0x04, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ unsigned char sigbderalt3[40] = {
+ 0x30, 0x26, 0x02, 0x21, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
+ };
+ unsigned char sigbderalt4[40] = {
+ 0x30, 0x26, 0x02, 0x01, 0x04, 0x02, 0x21, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ /* (order + r,4) encoded in DER. */
+ unsigned char sigbderlong[40] = {
+ 0x30, 0x26, 0x02, 0x21, 0x00, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC,
+ 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E,
+ 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04
+ };
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
+ for (recid2 = 0; recid2 < 4; recid2++) {
+ secp256k1_pubkey pubkey2b;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1);
+ /* Verifying with (order + r,4) should always fail. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ }
+ /* DER parsing tests. */
+ /* Zero length r/s. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0);
+ /* Leading zeros. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0);
+ sigbderalt3[4] = 1;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ sigbderalt4[7] = 1;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ /* Damage signature. */
+ sigbder[7]++;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ sigbder[7]--;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0);
+ for(i = 0; i < 8; i++) {
+ int c;
+ unsigned char orig = sigbder[i];
+ /*Try every single-byte change.*/
+ for (c = 0; c < 256; c++) {
+ if (c == orig ) {
+ continue;
+ }
+ sigbder[i] = c;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ }
+ sigbder[i] = orig;
+ }
+ }
+
+ /* Test r/s equal to zero */
+ {
+ /* (1,1) encoded in DER. */
+ unsigned char sigcder[8] = {0x30, 0x06, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01};
+ unsigned char sigc64[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ secp256k1_pubkey pubkeyc;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1);
+ sigcder[4] = 0;
+ sigc64[31] = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
+ sigcder[4] = 1;
+ sigcder[7] = 0;
+ sigc64[31] = 1;
+ sigc64[63] = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
+ }
+}
+
+void run_recovery_tests(void) {
+ int i;
+ for (i = 0; i < count; i++) {
+ test_ecdsa_recovery_api();
+ }
+ for (i = 0; i < 64*count; i++) {
+ test_ecdsa_recovery_end_to_end();
+ }
+ test_ecdsa_recovery_edge_cases();
+}
+
+#endif /* SECP256K1_MODULE_RECOVERY_TESTS_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/Makefile.am.include b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/Makefile.am.include
new file mode 100644
index 0000000..51ece21
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/Makefile.am.include
@@ -0,0 +1,6 @@
+include_HEADERS += include/secp256k1_surjectionproof.h
+noinst_HEADERS += src/modules/surjection/main_impl.h
+noinst_HEADERS += src/modules/surjection/surjection.h
+noinst_HEADERS += src/modules/surjection/surjection_impl.h
+noinst_HEADERS += src/modules/surjection/tests_impl.h
+
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/main_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/main_impl.h
new file mode 100644
index 0000000..c67d4c0
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/main_impl.h
@@ -0,0 +1,338 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+#ifndef SECP256K1_MODULE_SURJECTION_MAIN
+#define SECP256K1_MODULE_SURJECTION_MAIN
+
+#include
+#include
+
+#include "modules/rangeproof/borromean.h"
+#include "modules/surjection/surjection_impl.h"
+#include "hash.h"
+#include "include/secp256k1_rangeproof.h"
+#include "include/secp256k1_surjectionproof.h"
+
+static size_t secp256k1_count_bits_set(const unsigned char* data, size_t count) {
+ size_t ret = 0;
+ size_t i;
+ for (i = 0; i < count; i++) {
+#ifdef HAVE_BUILTIN_POPCOUNT
+ ret += __builtin_popcount(data[i]);
+#else
+ ret += !!(data[i] & 0x1);
+ ret += !!(data[i] & 0x2);
+ ret += !!(data[i] & 0x4);
+ ret += !!(data[i] & 0x8);
+ ret += !!(data[i] & 0x10);
+ ret += !!(data[i] & 0x20);
+ ret += !!(data[i] & 0x40);
+ ret += !!(data[i] & 0x80);
+#endif
+ }
+ return ret;
+}
+
+int secp256k1_surjectionproof_parse(const secp256k1_context* ctx, secp256k1_surjectionproof *proof, const unsigned char *input, size_t inputlen) {
+ size_t n_inputs;
+ size_t signature_len;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(input != NULL);
+ (void) ctx;
+
+ if (inputlen < 2) {
+ return 0;
+ }
+ n_inputs = ((size_t) (input[1] << 8)) + input[0];
+ if (n_inputs > SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS) {
+ return 0;
+ }
+ if (inputlen < 2 + (n_inputs + 7) / 8) {
+ return 0;
+ }
+
+ signature_len = 32 * (1 + secp256k1_count_bits_set(&input[2], (n_inputs + 7) / 8));
+ if (inputlen != 2 + (n_inputs + 7) / 8 + signature_len) {
+ return 0;
+ }
+ proof->n_inputs = n_inputs;
+ memcpy(proof->used_inputs, &input[2], (n_inputs + 7) / 8);
+ memcpy(proof->data, &input[2 + (n_inputs + 7) / 8], signature_len);
+
+ return 1;
+}
+
+int secp256k1_surjectionproof_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_surjectionproof *proof) {
+ size_t signature_len;
+ size_t serialized_len;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output != NULL);
+ ARG_CHECK(outputlen != NULL);
+ ARG_CHECK(proof != NULL);
+ (void) ctx;
+
+ signature_len = 32 * (1 + secp256k1_count_bits_set(proof->used_inputs, (proof->n_inputs + 7) / 8));
+ serialized_len = 2 + (proof->n_inputs + 7) / 8 + signature_len;
+ if (*outputlen < serialized_len) {
+ return 0;
+ }
+
+ output[0] = proof->n_inputs % 0x100;
+ output[1] = proof->n_inputs / 0x100;
+ memcpy(&output[2], proof->used_inputs, (proof->n_inputs + 7) / 8);
+ memcpy(&output[2 + (proof->n_inputs + 7) / 8], proof->data, signature_len);
+ *outputlen = serialized_len;
+
+ return 1;
+}
+
+size_t secp256k1_surjectionproof_n_total_inputs(const secp256k1_context* ctx, const secp256k1_surjectionproof* proof) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(proof != NULL);
+ (void) ctx;
+ return proof->n_inputs;
+}
+
+size_t secp256k1_surjectionproof_n_used_inputs(const secp256k1_context* ctx, const secp256k1_surjectionproof* proof) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(proof != NULL);
+ (void) ctx;
+ return secp256k1_count_bits_set(proof->used_inputs, (proof->n_inputs + 7) / 8);
+}
+
+size_t secp256k1_surjectionproof_serialized_size(const secp256k1_context* ctx, const secp256k1_surjectionproof* proof) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(proof != NULL);
+ return 2 + (proof->n_inputs + 7) / 8 + 32 * (1 + secp256k1_surjectionproof_n_used_inputs(ctx, proof));
+}
+
+typedef struct {
+ unsigned char state[32];
+ size_t state_i;
+} secp256k1_surjectionproof_csprng;
+
+static void secp256k1_surjectionproof_csprng_init(secp256k1_surjectionproof_csprng *csprng, const unsigned char* state) {
+ memcpy(csprng->state, state, 32);
+ csprng->state_i = 0;
+}
+
+static size_t secp256k1_surjectionproof_csprng_next(secp256k1_surjectionproof_csprng *csprng, size_t rand_max) {
+ /* The number of random bytes to read for each random sample */
+ const size_t increment = rand_max > 256 ? 2 : 1;
+ /* The maximum value expressable by the number of random bytes we read */
+ const size_t selection_range = rand_max > 256 ? 0xffff : 0xff;
+ /* The largest multiple of rand_max that fits within selection_range */
+ const size_t limit = ((selection_range + 1) / rand_max) * rand_max;
+
+ while (1) {
+ size_t val;
+ if (csprng->state_i + increment >= 32) {
+ secp256k1_sha256 sha;
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, csprng->state, 32);
+ secp256k1_sha256_finalize(&sha, csprng->state);
+ csprng->state_i = 0;
+ }
+ val = csprng->state[csprng->state_i];
+ if (increment > 1) {
+ val = (val << 8) + csprng->state[csprng->state_i + 1];
+ }
+ csprng->state_i += increment;
+ /* Accept only values below our limit. Values equal to or above the limit are
+ * biased because they comprise only a subset of the range (0, rand_max - 1) */
+ if (val < limit) {
+ return val % rand_max;
+ }
+ }
+}
+
+int secp256k1_surjectionproof_initialize(const secp256k1_context* ctx, secp256k1_surjectionproof* proof, size_t *input_index, const secp256k1_fixed_asset_tag* fixed_input_tags, const size_t n_input_tags, const size_t n_input_tags_to_use, const secp256k1_fixed_asset_tag* fixed_output_tag, const size_t n_max_iterations, const unsigned char *random_seed32) {
+ secp256k1_surjectionproof_csprng csprng;
+ size_t n_iterations = 0;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(input_index != NULL);
+ ARG_CHECK(fixed_input_tags != NULL);
+ ARG_CHECK(fixed_output_tag != NULL);
+ ARG_CHECK(random_seed32 != NULL);
+ ARG_CHECK(n_input_tags <= SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS);
+ ARG_CHECK(n_input_tags_to_use <= n_input_tags);
+ (void) ctx;
+
+ secp256k1_surjectionproof_csprng_init(&csprng, random_seed32);
+ memset(proof->data, 0, sizeof(proof->data));
+ proof->n_inputs = n_input_tags;
+
+ while (1) {
+ int has_output_tag = 0;
+ size_t i;
+
+ /* obtain a random set of indices */
+ memset(proof->used_inputs, 0, sizeof(proof->used_inputs));
+ for (i = 0; i < n_input_tags_to_use; i++) {
+ while (1) {
+ size_t next_input_index;
+ next_input_index = secp256k1_surjectionproof_csprng_next(&csprng, n_input_tags);
+ if (memcmp(&fixed_input_tags[next_input_index], fixed_output_tag, sizeof(*fixed_output_tag)) == 0) {
+ *input_index = next_input_index;
+ has_output_tag = 1;
+ }
+
+ if (!(proof->used_inputs[next_input_index / 8] & (1 << (next_input_index % 8)))) {
+ proof->used_inputs[next_input_index / 8] |= (1 << (next_input_index % 8));
+ break;
+ }
+ }
+ }
+
+ /* Check if we succeeded */
+ n_iterations++;
+ if (has_output_tag) {
+#ifdef VERIFY
+ proof->initialized = 1;
+#endif
+ return n_iterations;
+ }
+ if (n_iterations >= n_max_iterations) {
+#ifdef VERIFY
+ proof->initialized = 0;
+#endif
+ return 0;
+ }
+ }
+}
+
+int secp256k1_surjectionproof_generate(const secp256k1_context* ctx, secp256k1_surjectionproof* proof, const secp256k1_generator* ephemeral_input_tags, size_t n_ephemeral_input_tags, const secp256k1_generator* ephemeral_output_tag, size_t input_index, const unsigned char *input_blinding_key, const unsigned char *output_blinding_key) {
+ secp256k1_scalar blinding_key;
+ secp256k1_scalar tmps;
+ secp256k1_scalar nonce;
+ int overflow = 0;
+ size_t rsizes[1]; /* array needed for borromean sig API */
+ size_t indices[1]; /* array needed for borromean sig API */
+ size_t i;
+ size_t n_total_pubkeys;
+ size_t n_used_pubkeys;
+ size_t ring_input_index = 0;
+ secp256k1_gej ring_pubkeys[SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS];
+ secp256k1_scalar borromean_s[SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS];
+ secp256k1_ge inputs[SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS];
+ secp256k1_ge output;
+ unsigned char msg32[32];
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(ephemeral_input_tags != NULL);
+ ARG_CHECK(ephemeral_output_tag != NULL);
+ ARG_CHECK(input_blinding_key != NULL);
+ ARG_CHECK(output_blinding_key != NULL);
+#ifdef VERIFY
+ CHECK(proof->initialized == 1);
+#endif
+
+ /* Compute secret key */
+ secp256k1_scalar_set_b32(&tmps, input_blinding_key, &overflow);
+ if (overflow) {
+ return 0;
+ }
+ secp256k1_scalar_set_b32(&blinding_key, output_blinding_key, &overflow);
+ if (overflow) {
+ return 0;
+ }
+ /* The only time the input may equal the output is if neither one was blinded in the first place,
+ * i.e. both blinding keys are zero. Otherwise this is a privacy leak. */
+ if (secp256k1_scalar_eq(&tmps, &blinding_key) && !secp256k1_scalar_is_zero(&blinding_key)) {
+ return 0;
+ }
+ secp256k1_scalar_negate(&tmps, &tmps);
+ secp256k1_scalar_add(&blinding_key, &blinding_key, &tmps);
+
+ /* Compute public keys */
+ n_total_pubkeys = secp256k1_surjectionproof_n_total_inputs(ctx, proof);
+ n_used_pubkeys = secp256k1_surjectionproof_n_used_inputs(ctx, proof);
+ if (n_used_pubkeys > n_total_pubkeys || n_total_pubkeys != n_ephemeral_input_tags) {
+ return 0;
+ }
+
+ secp256k1_generator_load(&output, ephemeral_output_tag);
+ for (i = 0; i < n_total_pubkeys; i++) {
+ secp256k1_generator_load(&inputs[i], &ephemeral_input_tags[i]);
+ }
+
+ secp256k1_surjection_compute_public_keys(ring_pubkeys, n_used_pubkeys, inputs, n_total_pubkeys, proof->used_inputs, &output, input_index, &ring_input_index);
+
+ /* Produce signature */
+ rsizes[0] = (int) n_used_pubkeys;
+ indices[0] = (int) ring_input_index;
+ secp256k1_surjection_genmessage(msg32, inputs, n_total_pubkeys, &output);
+ if (secp256k1_surjection_genrand(borromean_s, n_used_pubkeys, &blinding_key) == 0) {
+ return 0;
+ }
+ /* Borromean sign will overwrite one of the s values we just generated, so use
+ * it as a nonce instead. This avoids extra random generation and also is an
+ * homage to the rangeproof code which does this very cleverly to encode messages. */
+ nonce = borromean_s[ring_input_index];
+ secp256k1_scalar_clear(&borromean_s[ring_input_index]);
+ if (secp256k1_borromean_sign(&ctx->ecmult_ctx, &ctx->ecmult_gen_ctx, &proof->data[0], borromean_s, ring_pubkeys, &nonce, &blinding_key, rsizes, indices, 1, msg32, 32) == 0) {
+ return 0;
+ }
+ for (i = 0; i < n_used_pubkeys; i++) {
+ secp256k1_scalar_get_b32(&proof->data[32 + 32 * i], &borromean_s[i]);
+ }
+ return 1;
+}
+
+int secp256k1_surjectionproof_verify(const secp256k1_context* ctx, const secp256k1_surjectionproof* proof, const secp256k1_generator* ephemeral_input_tags, size_t n_ephemeral_input_tags, const secp256k1_generator* ephemeral_output_tag) {
+ size_t rsizes[1]; /* array needed for borromean sig API */
+ size_t i;
+ size_t n_total_pubkeys;
+ size_t n_used_pubkeys;
+ secp256k1_gej ring_pubkeys[SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS];
+ secp256k1_scalar borromean_s[SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS];
+ secp256k1_ge inputs[SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS];
+ secp256k1_ge output;
+ unsigned char msg32[32];
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(proof != NULL);
+ ARG_CHECK(ephemeral_input_tags != NULL);
+ ARG_CHECK(ephemeral_output_tag != NULL);
+
+ /* Compute public keys */
+ n_total_pubkeys = secp256k1_surjectionproof_n_total_inputs(ctx, proof);
+ n_used_pubkeys = secp256k1_surjectionproof_n_used_inputs(ctx, proof);
+ if (n_used_pubkeys == 0 || n_used_pubkeys > n_total_pubkeys || n_total_pubkeys != n_ephemeral_input_tags) {
+ return 0;
+ }
+
+ secp256k1_generator_load(&output, ephemeral_output_tag);
+ for (i = 0; i < n_total_pubkeys; i++) {
+ secp256k1_generator_load(&inputs[i], &ephemeral_input_tags[i]);
+ }
+
+ if (secp256k1_surjection_compute_public_keys(ring_pubkeys, n_used_pubkeys, inputs, n_total_pubkeys, proof->used_inputs, &output, 0, NULL) == 0) {
+ return 0;
+ }
+
+ /* Verify signature */
+ rsizes[0] = (int) n_used_pubkeys;
+ for (i = 0; i < n_used_pubkeys; i++) {
+ int overflow = 0;
+ secp256k1_scalar_set_b32(&borromean_s[i], &proof->data[32 + 32 * i], &overflow);
+ if (overflow == 1) {
+ return 0;
+ }
+ }
+ secp256k1_surjection_genmessage(msg32, inputs, n_total_pubkeys, &output);
+ return secp256k1_borromean_verify(&ctx->ecmult_ctx, NULL, &proof->data[0], borromean_s, ring_pubkeys, rsizes, 1, msg32, 32);
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection.h
new file mode 100644
index 0000000..20ac493
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection.h
@@ -0,0 +1,19 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SURJECTION_H_
+#define _SECP256K1_SURJECTION_H_
+
+#include "group.h"
+#include "scalar.h"
+
+SECP256K1_INLINE static int secp256k1_surjection_genmessage(unsigned char *msg32, secp256k1_ge *ephemeral_input_tags, size_t n_input_tags, secp256k1_ge *ephemeral_output_tag);
+
+SECP256K1_INLINE static int secp256k1_surjection_genrand(secp256k1_scalar *s, size_t ns, const secp256k1_scalar *blinding_key);
+
+SECP256K1_INLINE static int secp256k1_surjection_compute_public_keys(secp256k1_gej *pubkeys, size_t n_pubkeys, const secp256k1_ge *input_tags, size_t n_input_tags, const unsigned char *used_tags, const secp256k1_ge *output_tag, size_t input_index, size_t *ring_input_index);
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection.md b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection.md
new file mode 100644
index 0000000..e7bd4db
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection.md
@@ -0,0 +1,108 @@
+Surjection Proof Module
+===========================
+
+This module implements a scheme by which a given point can be proven to be
+equal to one of a set of points, plus a known difference. This is used in
+Confidential Assets when reblinding "asset commitments", which are NUMS
+points, to prove that the underlying NUMS point does not change during
+reblinding.
+
+Assets are represented, in general, by a 32-byte seed (a hash of some
+transaction data) which is hashed to form a NUMS generator, which appears
+on the blockchain only in blinded form. We refer to the seed as an
+"asset ID" and the blinded generator as an "(ephemeral) asset commitment".
+These asset commitments are unique per-output, and their NUMS components
+are in general known only to the holder of the output.
+
+The result is that within a transaction, all outputs are able to have
+a new uniformly-random asset commitment which cannot be associated with
+any individual input asset id, but verifiers are nonetheless assured that
+all assets coming out of a transaction are ones that went in.
+
+### Terminology
+
+Assets are identified by a 32-byte "asset ID". In this library these IDs
+are used as input to a point-valued hash function `H`. We usually refer
+to the hash output as `A`, since this output is the only thing that appears
+in the algebra.
+
+Then transaction outputs have "asset commitments", which are curvepoints
+of the form `A + rG`, where `A` is the hash of the asset ID and `r` is
+some random "blinding factor".
+
+### Design Rationale
+
+Confidential Assets essentially works by replacing the second NUMS generator
+`H` in Confidental Transactions with a per-asset unique NUMS generator. This
+allows the same verification equation (the sum of all blinded inputs must
+equal the sum of all blinded outputs) to imply that quantity of *every* asset
+type is preserved in each transaction.
+
+It turns out that even if outputs are reblinded by the addition of `rG` for
+some known `r`, this verification equation has the same meaning, with one
+caveat: verifiers must be assured that the reblinding preserves the original
+generators (and does not, for example, negate them).
+
+This assurance is what surjection proofs provide.
+
+### Limitations
+
+The naive scheme works as follows: every output asset is shown to have come
+from some input asset. However, the proofs scale with the number of input
+assets, so for all outputs the total size of all surjection proofs is `O(mn)`
+for `m`, `n` the number of inputs and outputs.
+
+We therefore restrict the number of inputs that each output may have come
+from to 3 (well, some fixed number, which is passed into the API), which
+provides a weaker form of blinding, but gives `O(n)` scaling. Over many
+transactions, the privacy afforded by this increases exponentially.
+
+### Our Scheme
+
+Our scheme works as follows. Proofs are generated in two steps, "initialization"
+which selects a subset of inputs and "generation" which does the mathematical
+part of proof generation.
+
+Every input has an asset commitment for which we know the blinding key and
+underlying asset ID.
+
+#### Initialization
+
+The initialization function takes a list of input asset IDs and one output
+asset ID. It chooses an input subset of some fixed size repeatedly until it
+the output ID appears at least once in its subset.
+
+It stores a bitmap representing this subset in the proof object and returns
+the number of iterations it needed to choose the subset. The reciprocal of
+this represents the probability that a uniformly random input-output
+mapping would correspond to the actual input-output mapping, and therefore
+gives a measure of privacy. (Lower iteration counts are better.)
+
+It also informs the caller the index of the input whose ID matches the output.
+
+As the API works on only a single output at a time, the total probability
+should be computed by multiplying together the counts for each output.
+
+#### Generation
+
+The generation function takes a list of input asset commitments, an output
+asset commitment, the input index returned by the initialization step, and
+blinding keys for (a) the output commitment, (b) the input commitment. Here
+"the input commitment" refers specifically to the input whose index was
+chosen during initialization.
+
+Next, it computes a ring signature over the differences between the output
+commitment and every input commitment chosen during initialization. Since
+the discrete log of one of these is the difference between the output and
+input blinding keys, it is possible to create a ring signature over every
+differences will be the blinding factor of the output. We create such a
+signature, which completes the proof.
+
+#### Verification
+
+Verification takes a surjection proof object, a list of input commitments,
+and an output commitment. The proof object contains a ring signature and
+a bitmap describing which input commitments to use, and verification
+succeeds iff the signature verifies.
+
+
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection_impl.h
new file mode 100644
index 0000000..f58026d
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/surjection_impl.h
@@ -0,0 +1,86 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SURJECTION_IMPL_H_
+#define _SECP256K1_SURJECTION_IMPL_H_
+
+#include
+#include
+
+#include "eckey.h"
+#include "group.h"
+#include "scalar.h"
+#include "hash.h"
+
+SECP256K1_INLINE static void secp256k1_surjection_genmessage(unsigned char *msg32, secp256k1_ge *ephemeral_input_tags, size_t n_input_tags, secp256k1_ge *ephemeral_output_tag) {
+ /* compute message */
+ size_t i;
+ unsigned char pk_ser[33];
+ size_t pk_len = sizeof(pk_ser);
+ secp256k1_sha256 sha256_en;
+
+ secp256k1_sha256_initialize(&sha256_en);
+ for (i = 0; i < n_input_tags; i++) {
+ secp256k1_eckey_pubkey_serialize(&ephemeral_input_tags[i], pk_ser, &pk_len, 1);
+ assert(pk_len == sizeof(pk_ser));
+ secp256k1_sha256_write(&sha256_en, pk_ser, pk_len);
+ }
+ secp256k1_eckey_pubkey_serialize(ephemeral_output_tag, pk_ser, &pk_len, 1);
+ assert(pk_len == sizeof(pk_ser));
+ secp256k1_sha256_write(&sha256_en, pk_ser, pk_len);
+ secp256k1_sha256_finalize(&sha256_en, msg32);
+}
+
+SECP256K1_INLINE static int secp256k1_surjection_genrand(secp256k1_scalar *s, size_t ns, const secp256k1_scalar *blinding_key) {
+ size_t i;
+ unsigned char sec_input[36];
+ secp256k1_sha256 sha256_en;
+
+ /* compute s values */
+ secp256k1_scalar_get_b32(&sec_input[4], blinding_key);
+ for (i = 0; i < ns; i++) {
+ int overflow = 0;
+ sec_input[0] = i;
+ sec_input[1] = i >> 8;
+ sec_input[2] = i >> 16;
+ sec_input[3] = i >> 24;
+
+ secp256k1_sha256_initialize(&sha256_en);
+ secp256k1_sha256_write(&sha256_en, sec_input, 36);
+ secp256k1_sha256_finalize(&sha256_en, sec_input);
+ secp256k1_scalar_set_b32(&s[i], sec_input, &overflow);
+ if (overflow == 1) {
+ memset(sec_input, 0, 32);
+ return 0;
+ }
+ }
+ memset(sec_input, 0, 32);
+ return 1;
+}
+
+SECP256K1_INLINE static int secp256k1_surjection_compute_public_keys(secp256k1_gej *pubkeys, size_t n_pubkeys, const secp256k1_ge *input_tags, size_t n_input_tags, const unsigned char *used_tags, const secp256k1_ge *output_tag, size_t input_index, size_t *ring_input_index) {
+ size_t i;
+ size_t j = 0;
+ for (i = 0; i < n_input_tags; i++) {
+ if (used_tags[i / 8] & (1 << (i % 8))) {
+ secp256k1_ge tmpge;
+ secp256k1_ge_neg(&tmpge, &input_tags[i]);
+ secp256k1_gej_set_ge(&pubkeys[j], &tmpge);
+ secp256k1_gej_add_ge_var(&pubkeys[j], &pubkeys[j], output_tag, NULL);
+ if (ring_input_index != NULL && input_index == i) {
+ *ring_input_index = j;
+ }
+ j++;
+ if (j > n_pubkeys) {
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/tests_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/tests_impl.h
new file mode 100644
index 0000000..a0856e2
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/surjection/tests_impl.h
@@ -0,0 +1,494 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_SURJECTIONPROOF_TESTS
+#define SECP256K1_MODULE_SURJECTIONPROOF_TESTS
+
+#include "testrand.h"
+#include "group.h"
+#include "include/secp256k1_generator.h"
+#include "include/secp256k1_rangeproof.h"
+#include "include/secp256k1_surjectionproof.h"
+
+static void test_surjectionproof_api(void) {
+ unsigned char seed[32];
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ secp256k1_fixed_asset_tag fixed_input_tags[10];
+ secp256k1_fixed_asset_tag fixed_output_tag;
+ secp256k1_generator ephemeral_input_tags[10];
+ secp256k1_generator ephemeral_output_tag;
+ unsigned char input_blinding_key[10][32];
+ unsigned char output_blinding_key[32];
+ unsigned char serialized_proof[SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES_MAX];
+ size_t serialized_len;
+ secp256k1_surjectionproof proof;
+ size_t n_inputs = sizeof(fixed_input_tags) / sizeof(fixed_input_tags[0]);
+ size_t input_index;
+ int32_t ecount = 0;
+ size_t i;
+
+ secp256k1_rand256(seed);
+ secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
+
+ for (i = 0; i < n_inputs; i++) {
+ secp256k1_rand256(input_blinding_key[i]);
+ secp256k1_rand256(fixed_input_tags[i].data);
+ CHECK(secp256k1_generator_generate_blinded(ctx, &ephemeral_input_tags[i], fixed_input_tags[i].data, input_blinding_key[i]));
+ }
+ secp256k1_rand256(output_blinding_key);
+ memcpy(&fixed_output_tag, &fixed_input_tags[0], sizeof(fixed_input_tags[0]));
+ CHECK(secp256k1_generator_generate_blinded(ctx, &ephemeral_output_tag, fixed_output_tag.data, output_blinding_key));
+
+ /* check initialize */
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, n_inputs, 0, &fixed_input_tags[0], 100, seed) == 0);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, n_inputs, 3, &fixed_input_tags[0], 100, seed) != 0);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_surjectionproof_initialize(none, NULL, &input_index, fixed_input_tags, n_inputs, 3, &fixed_input_tags[0], 100, seed) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, NULL, fixed_input_tags, n_inputs, 3, &fixed_input_tags[0], 100, seed) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, NULL, n_inputs, 3, &fixed_input_tags[0], 100, seed) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS + 1, 3, &fixed_input_tags[0], 100, seed) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, n_inputs, n_inputs, &fixed_input_tags[0], 100, seed) != 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, n_inputs, n_inputs + 1, &fixed_input_tags[0], 100, seed) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, n_inputs, 3, NULL, 100, seed) == 0);
+ CHECK(ecount == 6);
+ CHECK((secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, n_inputs, 0, &fixed_input_tags[0], 0, seed) & 1) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, n_inputs, 0, &fixed_input_tags[0], 100, NULL) == 0);
+ CHECK(ecount == 7);
+
+ CHECK(secp256k1_surjectionproof_initialize(none, &proof, &input_index, fixed_input_tags, n_inputs, 3, &fixed_input_tags[0], 100, seed) != 0);
+ /* check generate */
+ CHECK(secp256k1_surjectionproof_generate(none, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 8);
+ CHECK(secp256k1_surjectionproof_generate(vrfy, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 9);
+
+ CHECK(secp256k1_surjectionproof_generate(sign, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 10);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) != 0);
+ CHECK(ecount == 10);
+
+ CHECK(secp256k1_surjectionproof_generate(both, NULL, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, NULL, n_inputs, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs + 1, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs - 1, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, 0, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs, NULL, 0, input_blinding_key[0], output_blinding_key) == 0);
+ CHECK(ecount == 13);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 1, input_blinding_key[0], output_blinding_key) != 0);
+ CHECK(ecount == 13); /* the above line "succeeds" but generates an invalid proof as the input_index is wrong. it is fairly expensive to detect this. should we? */
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, n_inputs + 1, input_blinding_key[0], output_blinding_key) != 0);
+ CHECK(ecount == 13);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 0, NULL, output_blinding_key) == 0);
+ CHECK(ecount == 14);
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 0, input_blinding_key[0], NULL) == 0);
+ CHECK(ecount == 15);
+
+ CHECK(secp256k1_surjectionproof_generate(both, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag, 0, input_blinding_key[0], output_blinding_key) != 0);
+ /* check verify */
+ CHECK(secp256k1_surjectionproof_verify(none, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag) == 0);
+ CHECK(ecount == 16);
+ CHECK(secp256k1_surjectionproof_verify(sign, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag) == 0);
+ CHECK(ecount == 17);
+ CHECK(secp256k1_surjectionproof_verify(vrfy, &proof, ephemeral_input_tags, n_inputs, &ephemeral_output_tag) != 0);
+ CHECK(ecount == 17);
+
+ CHECK(secp256k1_surjectionproof_verify(vrfy, NULL, ephemeral_input_tags, n_inputs, &ephemeral_output_tag) == 0);
+ CHECK(ecount == 18);
+ CHECK(secp256k1_surjectionproof_verify(vrfy, &proof, NULL, n_inputs, &ephemeral_output_tag) == 0);
+ CHECK(ecount == 19);
+ CHECK(secp256k1_surjectionproof_verify(vrfy, &proof, ephemeral_input_tags, n_inputs - 1, &ephemeral_output_tag) == 0);
+ CHECK(ecount == 19);
+ CHECK(secp256k1_surjectionproof_verify(vrfy, &proof, ephemeral_input_tags, n_inputs + 1, &ephemeral_output_tag) == 0);
+ CHECK(ecount == 19);
+ CHECK(secp256k1_surjectionproof_verify(vrfy, &proof, ephemeral_input_tags, n_inputs, NULL) == 0);
+ CHECK(ecount == 20);
+
+ /* Check serialize */
+ serialized_len = sizeof(serialized_proof);
+ CHECK(secp256k1_surjectionproof_serialize(none, serialized_proof, &serialized_len, &proof) != 0);
+ CHECK(ecount == 20);
+ serialized_len = sizeof(serialized_proof);
+ CHECK(secp256k1_surjectionproof_serialize(none, NULL, &serialized_len, &proof) == 0);
+ CHECK(ecount == 21);
+ serialized_len = sizeof(serialized_proof);
+ CHECK(secp256k1_surjectionproof_serialize(none, serialized_proof, NULL, &proof) == 0);
+ CHECK(ecount == 22);
+ serialized_len = sizeof(serialized_proof);
+ CHECK(secp256k1_surjectionproof_serialize(none, serialized_proof, &serialized_len, NULL) == 0);
+ CHECK(ecount == 23);
+
+ serialized_len = sizeof(serialized_proof);
+ CHECK(secp256k1_surjectionproof_serialize(none, serialized_proof, &serialized_len, &proof) != 0);
+ /* Check parse */
+ CHECK(secp256k1_surjectionproof_parse(none, &proof, serialized_proof, serialized_len) != 0);
+ CHECK(ecount == 23);
+ CHECK(secp256k1_surjectionproof_parse(none, NULL, serialized_proof, serialized_len) == 0);
+ CHECK(ecount == 24);
+ CHECK(secp256k1_surjectionproof_parse(none, &proof, NULL, serialized_len) == 0);
+ CHECK(ecount == 25);
+ CHECK(secp256k1_surjectionproof_parse(none, &proof, serialized_proof, 0) == 0);
+ CHECK(ecount == 25);
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+}
+
+static void test_input_selection(size_t n_inputs) {
+ unsigned char seed[32];
+ size_t i;
+ size_t result;
+ size_t input_index;
+ size_t try_count = n_inputs * 100;
+ secp256k1_surjectionproof proof;
+ secp256k1_fixed_asset_tag fixed_input_tags[1000];
+ const size_t max_n_inputs = sizeof(fixed_input_tags) / sizeof(fixed_input_tags[0]) - 1;
+
+ CHECK(n_inputs < max_n_inputs);
+ secp256k1_rand256(seed);
+
+ for (i = 0; i < n_inputs + 1; i++) {
+ secp256k1_rand256(fixed_input_tags[i].data);
+ }
+
+ /* cannot match output when told to use zero keys */
+ result = secp256k1_surjectionproof_initialize(ctx, &proof, &input_index, fixed_input_tags, n_inputs, 0, &fixed_input_tags[0], try_count, seed);
+ CHECK(result == 0);
+ CHECK(secp256k1_surjectionproof_n_used_inputs(ctx, &proof) == 0);
+ CHECK(secp256k1_surjectionproof_n_total_inputs(ctx, &proof) == n_inputs);
+ CHECK(secp256k1_surjectionproof_serialized_size(ctx, &proof) == 34 + (n_inputs + 7) / 8);
+ if (n_inputs > 0) {
+ /* succeed in 100*n_inputs tries (probability of failure e^-100) */
+ result = secp256k1_surjectionproof_initialize(ctx, &proof, &input_index, fixed_input_tags, n_inputs, 1, &fixed_input_tags[0], try_count, seed);
+ CHECK(result > 0);
+ CHECK(result < n_inputs * 10);
+ CHECK(secp256k1_surjectionproof_n_used_inputs(ctx, &proof) == 1);
+ CHECK(secp256k1_surjectionproof_n_total_inputs(ctx, &proof) == n_inputs);
+ CHECK(secp256k1_surjectionproof_serialized_size(ctx, &proof) == 66 + (n_inputs + 7) / 8);
+ CHECK(input_index == 0);
+ }
+
+ if (n_inputs >= 3) {
+ /* succeed in 10*n_inputs tries (probability of failure e^-10) */
+ result = secp256k1_surjectionproof_initialize(ctx, &proof, &input_index, fixed_input_tags, n_inputs, 3, &fixed_input_tags[1], try_count, seed);
+ CHECK(result > 0);
+ CHECK(secp256k1_surjectionproof_n_used_inputs(ctx, &proof) == 3);
+ CHECK(secp256k1_surjectionproof_n_total_inputs(ctx, &proof) == n_inputs);
+ CHECK(secp256k1_surjectionproof_serialized_size(ctx, &proof) == 130 + (n_inputs + 7) / 8);
+ CHECK(input_index == 1);
+
+ /* fail, key not found */
+ result = secp256k1_surjectionproof_initialize(ctx, &proof, &input_index, fixed_input_tags, n_inputs, 3, &fixed_input_tags[n_inputs], try_count, seed);
+ CHECK(result == 0);
+
+ /* succeed on first try when told to use all keys */
+ result = secp256k1_surjectionproof_initialize(ctx, &proof, &input_index, fixed_input_tags, n_inputs, n_inputs, &fixed_input_tags[0], try_count, seed);
+ CHECK(result == 1);
+ CHECK(secp256k1_surjectionproof_n_used_inputs(ctx, &proof) == n_inputs);
+ CHECK(secp256k1_surjectionproof_n_total_inputs(ctx, &proof) == n_inputs);
+ CHECK(secp256k1_surjectionproof_serialized_size(ctx, &proof) == 2 + 32 * (n_inputs + 1) + (n_inputs + 7) / 8);
+ CHECK(input_index == 0);
+
+ /* succeed in less than 64 tries when told to use half keys. (probability of failure 2^-64) */
+ result = secp256k1_surjectionproof_initialize(ctx, &proof, &input_index, fixed_input_tags, n_inputs, n_inputs / 2, &fixed_input_tags[0], 64, seed);
+ CHECK(result > 0);
+ CHECK(result < 64);
+ CHECK(secp256k1_surjectionproof_n_used_inputs(ctx, &proof) == n_inputs / 2);
+ CHECK(secp256k1_surjectionproof_n_total_inputs(ctx, &proof) == n_inputs);
+ CHECK(secp256k1_surjectionproof_serialized_size(ctx, &proof) == 2 + 32 * (n_inputs / 2 + 1) + (n_inputs + 7) / 8);
+ CHECK(input_index == 0);
+ }
+}
+
+/** Runs surjectionproof_initilize multiple times and records the number of times each input was used.
+ */
+static void test_input_selection_distribution_helper(const secp256k1_fixed_asset_tag* fixed_input_tags, const size_t n_input_tags, const size_t n_input_tags_to_use, size_t *used_inputs) {
+ secp256k1_surjectionproof proof;
+ size_t input_index;
+ size_t i;
+ size_t j;
+ unsigned char seed[32];
+ size_t result;
+ for (i = 0; i < n_input_tags; i++) {
+ used_inputs[i] = 0;
+ }
+ for(j = 0; j < 10000; j++) {
+ secp256k1_rand256(seed);
+ result = secp256k1_surjectionproof_initialize(ctx, &proof, &input_index, fixed_input_tags, n_input_tags, n_input_tags_to_use, &fixed_input_tags[0], 64, seed);
+ CHECK(result > 0);
+
+ for (i = 0; i < n_input_tags; i++) {
+ if (proof.used_inputs[i / 8] & (1 << (i % 8))) {
+ used_inputs[i] += 1;
+ }
+ }
+ }
+}
+
+/** Probabilistic test of the distribution of used_inputs after surjectionproof_initialize.
+ * Each confidence interval assertion fails incorrectly with a probability of 2^-128.
+ */
+static void test_input_selection_distribution(void) {
+ size_t i;
+ size_t n_input_tags_to_use;
+ const size_t n_inputs = 4;
+ secp256k1_fixed_asset_tag fixed_input_tags[4];
+ size_t used_inputs[4];
+
+ for (i = 0; i < n_inputs; i++) {
+ secp256k1_rand256(fixed_input_tags[i].data);
+ }
+
+ /* If there is one input tag to use, initialize must choose the one equal to fixed_output_tag. */
+ n_input_tags_to_use = 1;
+ test_input_selection_distribution_helper(fixed_input_tags, n_inputs, n_input_tags_to_use, used_inputs);
+ CHECK(used_inputs[0] == 10000);
+ CHECK(used_inputs[1] == 0);
+ CHECK(used_inputs[2] == 0);
+ CHECK(used_inputs[3] == 0);
+
+ n_input_tags_to_use = 2;
+ /* The input equal to the fixed_output_tag must be included in all used_inputs sets.
+ * For each fixed_input_tag != fixed_output_tag the probability that it's included
+ * in the used_inputs set is P(used_input|not fixed_output_tag) = 1/3.
+ */
+ test_input_selection_distribution_helper(fixed_input_tags, n_inputs, n_input_tags_to_use, used_inputs);
+ CHECK(used_inputs[0] == 10000);
+ CHECK(used_inputs[1] > 2725 && used_inputs[1] < 3961);
+ CHECK(used_inputs[2] > 2725 && used_inputs[2] < 3961);
+ CHECK(used_inputs[3] > 2725 && used_inputs[3] < 3961);
+
+ n_input_tags_to_use = 3;
+ /* P(used_input|not fixed_output_tag) = 2/3 */
+ test_input_selection_distribution_helper(fixed_input_tags, n_inputs, n_input_tags_to_use, used_inputs);
+ CHECK(used_inputs[0] == 10000);
+ CHECK(used_inputs[1] > 6039 && used_inputs[1] < 7275);
+ CHECK(used_inputs[2] > 6039 && used_inputs[2] < 7275);
+ CHECK(used_inputs[3] > 6039 && used_inputs[3] < 7275);
+
+
+ n_input_tags_to_use = 1;
+ /* Create second input tag that is equal to the output tag. Therefore, when using only
+ * one input we have P(used_input|fixed_output_tag) = 1/2 and P(used_input|not fixed_output_tag) = 0
+ */
+ memcpy(fixed_input_tags[0].data, fixed_input_tags[1].data, 32);
+ test_input_selection_distribution_helper(fixed_input_tags, n_inputs, n_input_tags_to_use, used_inputs);
+ CHECK(used_inputs[0] > 4345 && used_inputs[0] < 5655);
+ CHECK(used_inputs[1] > 4345 && used_inputs[1] < 5655);
+ CHECK(used_inputs[2] == 0);
+ CHECK(used_inputs[3] == 0);
+
+ n_input_tags_to_use = 2;
+ /* When choosing 2 inputs in initialization there are 5 possible combinations of
+ * input indexes {(0, 1), (1, 2), (0, 3), (1, 3), (0, 2)}. Therefore we have
+ * P(used_input|fixed_output_tag) = 3/5 and P(used_input|not fixed_output_tag) = 2/5.
+ */
+ test_input_selection_distribution_helper(fixed_input_tags, n_inputs, n_input_tags_to_use, used_inputs);
+ CHECK(used_inputs[0] > 5352 && used_inputs[0] < 6637);
+ CHECK(used_inputs[1] > 5352 && used_inputs[1] < 6637);
+ CHECK(used_inputs[2] > 3363 && used_inputs[2] < 4648);
+ CHECK(used_inputs[3] > 3363 && used_inputs[3] < 4648);
+
+ n_input_tags_to_use = 3;
+ /* There are 4 combinations, each with all inputs except one. Therefore we have
+ * P(used_input|fixed_output_tag) = 3/4 and P(used_input|not fixed_output_tag) = 3/4.
+ */
+ test_input_selection_distribution_helper(fixed_input_tags, n_inputs, n_input_tags_to_use, used_inputs);
+ CHECK(used_inputs[0] > 6918 && used_inputs[0] < 8053);
+ CHECK(used_inputs[1] > 6918 && used_inputs[1] < 8053);
+ CHECK(used_inputs[2] > 6918 && used_inputs[2] < 8053);
+ CHECK(used_inputs[3] > 6918 && used_inputs[3] < 8053);
+}
+
+static void test_gen_verify(size_t n_inputs, size_t n_used) {
+ unsigned char seed[32];
+ secp256k1_surjectionproof proof;
+ unsigned char serialized_proof[SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES_MAX];
+ unsigned char serialized_proof_trailing[SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES_MAX + 1];
+ size_t serialized_len = SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES_MAX;
+ secp256k1_fixed_asset_tag fixed_input_tags[1000];
+ secp256k1_generator ephemeral_input_tags[1000];
+ unsigned char *input_blinding_key[1000];
+ const size_t max_n_inputs = sizeof(fixed_input_tags) / sizeof(fixed_input_tags[0]) - 1;
+ size_t try_count = n_inputs * 100;
+ size_t key_index;
+ size_t input_index;
+ size_t i;
+ int result;
+
+ /* setup */
+ CHECK(n_used <= n_inputs);
+ CHECK(n_inputs < max_n_inputs);
+ secp256k1_rand256(seed);
+
+ key_index = (((size_t) seed[0] << 8) + seed[1]) % n_inputs;
+
+ for (i = 0; i < n_inputs + 1; i++) {
+ input_blinding_key[i] = malloc(32);
+ secp256k1_rand256(input_blinding_key[i]);
+ /* choose random fixed tag, except that for the output one copy from the key_index */
+ if (i < n_inputs) {
+ secp256k1_rand256(fixed_input_tags[i].data);
+ } else {
+ memcpy(&fixed_input_tags[i], &fixed_input_tags[key_index], sizeof(fixed_input_tags[i]));
+ }
+ CHECK(secp256k1_generator_generate_blinded(ctx, &ephemeral_input_tags[i], fixed_input_tags[i].data, input_blinding_key[i]));
+ }
+
+ /* test */
+ result = secp256k1_surjectionproof_initialize(ctx, &proof, &input_index, fixed_input_tags, n_inputs, n_used, &fixed_input_tags[key_index], try_count, seed);
+ if (n_used == 0) {
+ CHECK(result == 0);
+ return;
+ }
+ CHECK(result > 0);
+ CHECK(input_index == key_index);
+
+ result = secp256k1_surjectionproof_generate(ctx, &proof, ephemeral_input_tags, n_inputs, &ephemeral_input_tags[n_inputs], input_index, input_blinding_key[input_index], input_blinding_key[n_inputs]);
+ CHECK(result == 1);
+
+ CHECK(secp256k1_surjectionproof_serialize(ctx, serialized_proof, &serialized_len, &proof));
+ CHECK(serialized_len == secp256k1_surjectionproof_serialized_size(ctx, &proof));
+ CHECK(serialized_len == SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES(n_inputs, n_used));
+
+ /* trailing garbage */
+ memcpy(&serialized_proof_trailing, &serialized_proof, serialized_len);
+ serialized_proof_trailing[serialized_len] = seed[0];
+ CHECK(secp256k1_surjectionproof_parse(ctx, &proof, serialized_proof, serialized_len + 1) == 0);
+
+ CHECK(secp256k1_surjectionproof_parse(ctx, &proof, serialized_proof, serialized_len));
+ result = secp256k1_surjectionproof_verify(ctx, &proof, ephemeral_input_tags, n_inputs, &ephemeral_input_tags[n_inputs]);
+ CHECK(result == 1);
+ /* various fail cases */
+ if (n_inputs > 1) {
+ result = secp256k1_surjectionproof_verify(ctx, &proof, ephemeral_input_tags, n_inputs, &ephemeral_input_tags[n_inputs - 1]);
+ CHECK(result == 0);
+
+ /* number of entries in ephemeral_input_tags array is less than proof.n_inputs */
+ n_inputs -= 1;
+ result = secp256k1_surjectionproof_generate(ctx, &proof, ephemeral_input_tags, n_inputs, &ephemeral_input_tags[n_inputs], input_index, input_blinding_key[input_index], input_blinding_key[n_inputs]);
+ CHECK(result == 0);
+ result = secp256k1_surjectionproof_verify(ctx, &proof, ephemeral_input_tags, n_inputs, &ephemeral_input_tags[n_inputs - 1]);
+ CHECK(result == 0);
+ n_inputs += 1;
+ }
+
+ /* cleanup */
+ for (i = 0; i < n_inputs + 1; i++) {
+ free(input_blinding_key[i]);
+ }
+}
+
+/* check that a proof with empty n_used_inputs is invalid */
+static void test_no_used_inputs_verify(void) {
+ secp256k1_surjectionproof proof;
+ secp256k1_fixed_asset_tag fixed_input_tag;
+ secp256k1_fixed_asset_tag fixed_output_tag;
+ secp256k1_generator ephemeral_input_tags[1];
+ size_t n_ephemeral_input_tags = 1;
+ secp256k1_generator ephemeral_output_tag;
+ unsigned char blinding_key[32];
+ secp256k1_ge inputs[1];
+ secp256k1_ge output;
+ secp256k1_sha256 sha256_e0;
+ int result;
+
+ /* Create proof that doesn't use inputs. secp256k1_surjectionproof_initialize
+ * will not work here since it insists on selecting an input that matches the output. */
+ proof.n_inputs = 1;
+ memset(proof.used_inputs, 0, SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS / 8);
+
+ /* create different fixed input and output tags */
+ secp256k1_rand256(fixed_input_tag.data);
+ secp256k1_rand256(fixed_output_tag.data);
+
+ /* blind fixed output tags with random blinding key */
+ secp256k1_rand256(blinding_key);
+ CHECK(secp256k1_generator_generate_blinded(ctx, &ephemeral_input_tags[0], fixed_input_tag.data, blinding_key));
+ CHECK(secp256k1_generator_generate_blinded(ctx, &ephemeral_output_tag, fixed_output_tag.data, blinding_key));
+
+ /* create "borromean signature" which is just a hash of metadata (pubkeys, etc) in this case */
+ secp256k1_generator_load(&output, &ephemeral_output_tag);
+ secp256k1_generator_load(&inputs[0], &ephemeral_input_tags[0]);
+ secp256k1_surjection_genmessage(proof.data, inputs, 1, &output);
+ secp256k1_sha256_initialize(&sha256_e0);
+ secp256k1_sha256_write(&sha256_e0, proof.data, 32);
+ secp256k1_sha256_finalize(&sha256_e0, proof.data);
+
+ result = secp256k1_surjectionproof_verify(ctx, &proof, ephemeral_input_tags, n_ephemeral_input_tags, &ephemeral_output_tag);
+ CHECK(result == 0);
+}
+
+void test_bad_serialize(void) {
+ secp256k1_surjectionproof proof;
+ unsigned char serialized_proof[SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES_MAX];
+ size_t serialized_len;
+
+ proof.n_inputs = 0;
+ serialized_len = 2 + 31;
+ /* e0 is one byte too short */
+ CHECK(secp256k1_surjectionproof_serialize(ctx, serialized_proof, &serialized_len, &proof) == 0);
+}
+
+void test_bad_parse(void) {
+ secp256k1_surjectionproof proof;
+ unsigned char serialized_proof0[] = { 0x00 };
+ unsigned char serialized_proof1[] = { 0x01, 0x00 };
+ unsigned char serialized_proof2[33] = { 0 };
+
+ /* Missing total input count */
+ CHECK(secp256k1_surjectionproof_parse(ctx, &proof, serialized_proof0, sizeof(serialized_proof0)) == 0);
+ /* Missing bitmap */
+ CHECK(secp256k1_surjectionproof_parse(ctx, &proof, serialized_proof1, sizeof(serialized_proof1)) == 0);
+ /* Missing e0 value */
+ CHECK(secp256k1_surjectionproof_parse(ctx, &proof, serialized_proof2, sizeof(serialized_proof2)) == 0);
+}
+
+void run_surjection_tests(void) {
+ int i;
+ for (i = 0; i < count; i++) {
+ test_surjectionproof_api();
+ }
+
+ test_input_selection(0);
+ test_input_selection(1);
+ test_input_selection(5);
+ test_input_selection(100);
+ test_input_selection(SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS);
+
+ test_input_selection_distribution();
+ test_gen_verify(10, 3);
+ test_gen_verify(SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS, SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS);
+ test_no_used_inputs_verify();
+ test_bad_serialize();
+ test_bad_parse();
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/Makefile.am.include b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/Makefile.am.include
new file mode 100644
index 0000000..0dc5a64
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/Makefile.am.include
@@ -0,0 +1,10 @@
+include_HEADERS += include/secp256k1_whitelist.h
+noinst_HEADERS += src/modules/whitelist/whitelist_impl.h
+noinst_HEADERS += src/modules/whitelist/main_impl.h
+noinst_HEADERS += src/modules/whitelist/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_whitelist
+bench_whitelist_SOURCES = src/bench_whitelist.c
+bench_whitelist_LDADD = libsecp256k1.la $(SECP_LIBS)
+bench_generator_LDFLAGS = -static
+endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/main_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/main_impl.h
new file mode 100644
index 0000000..0b2d6c9
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/main_impl.h
@@ -0,0 +1,174 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_WHITELIST_MAIN
+#define SECP256K1_MODULE_WHITELIST_MAIN
+
+#include "include/secp256k1_whitelist.h"
+#include "modules/whitelist/whitelist_impl.h"
+
+#define MAX_KEYS SECP256K1_WHITELIST_MAX_N_KEYS /* shorter alias */
+
+int secp256k1_whitelist_sign(const secp256k1_context* ctx, secp256k1_whitelist_signature *sig, const secp256k1_pubkey *online_pubkeys, const secp256k1_pubkey *offline_pubkeys, const size_t n_keys, const secp256k1_pubkey *sub_pubkey, const unsigned char *online_seckey, const unsigned char *summed_seckey, const size_t index, secp256k1_nonce_function noncefp, const void *noncedata) {
+ secp256k1_gej pubs[MAX_KEYS];
+ secp256k1_scalar s[MAX_KEYS];
+ secp256k1_scalar sec, non;
+ unsigned char msg32[32];
+ int ret;
+
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ /* Sanity checks */
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(online_pubkeys != NULL);
+ ARG_CHECK(offline_pubkeys != NULL);
+ ARG_CHECK(n_keys <= MAX_KEYS);
+ ARG_CHECK(sub_pubkey != NULL);
+ ARG_CHECK(online_seckey != NULL);
+ ARG_CHECK(summed_seckey != NULL);
+ ARG_CHECK(index < n_keys);
+
+ /* Compute pubkeys: online_pubkey + tweaked(offline_pubkey + address), and message */
+ ret = secp256k1_whitelist_compute_keys_and_message(ctx, msg32, pubs, online_pubkeys, offline_pubkeys, n_keys, sub_pubkey);
+
+ /* Compute signing key: online_seckey + tweaked(summed_seckey) */
+ if (ret) {
+ ret = secp256k1_whitelist_compute_tweaked_privkey(ctx, &sec, online_seckey, summed_seckey);
+ }
+ /* Compute nonce and random s-values */
+ if (ret) {
+ unsigned char seckey32[32];
+ unsigned int count = 0;
+ int overflow = 0;
+
+ secp256k1_scalar_get_b32(seckey32, &sec);
+ while (1) {
+ size_t i;
+ unsigned char nonce32[32];
+ int done;
+ ret = noncefp(nonce32, msg32, seckey32, NULL, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&non, nonce32, &overflow);
+ memset(nonce32, 0, 32);
+ if (overflow || secp256k1_scalar_is_zero(&non)) {
+ count++;
+ continue;
+ }
+ done = 1;
+ for (i = 0; i < n_keys; i++) {
+ msg32[0] ^= i + 1;
+ msg32[1] ^= (i + 1) / 0x100;
+ ret = noncefp(&sig->data[32 * (i + 1)], msg32, seckey32, NULL, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&s[i], &sig->data[32 * (i + 1)], &overflow);
+ msg32[0] ^= i + 1;
+ msg32[1] ^= (i + 1) / 0x100;
+ if (overflow || secp256k1_scalar_is_zero(&s[i])) {
+ count++;
+ done = 0;
+ break;
+ }
+ }
+ if (done) {
+ break;
+ }
+ }
+ memset(seckey32, 0, 32);
+ }
+ /* Actually sign */
+ if (ret) {
+ sig->n_keys = n_keys;
+ ret = secp256k1_borromean_sign(&ctx->ecmult_ctx, &ctx->ecmult_gen_ctx, &sig->data[0], s, pubs, &non, &sec, &n_keys, &index, 1, msg32, 32);
+ /* Signing will change s[index], so update in the sig structure */
+ secp256k1_scalar_get_b32(&sig->data[32 * (index + 1)], &s[index]);
+ }
+
+ secp256k1_scalar_clear(&non);
+ secp256k1_scalar_clear(&sec);
+ return ret;
+}
+
+int secp256k1_whitelist_verify(const secp256k1_context* ctx, const secp256k1_whitelist_signature *sig, const secp256k1_pubkey *online_pubkeys, const secp256k1_pubkey *offline_pubkeys, const size_t n_keys, const secp256k1_pubkey *sub_pubkey) {
+ secp256k1_scalar s[MAX_KEYS];
+ secp256k1_gej pubs[MAX_KEYS];
+ unsigned char msg32[32];
+ size_t i;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(online_pubkeys != NULL);
+ ARG_CHECK(offline_pubkeys != NULL);
+ ARG_CHECK(sub_pubkey != NULL);
+
+ if (sig->n_keys > MAX_KEYS || sig->n_keys != n_keys) {
+ return 0;
+ }
+ for (i = 0; i < sig->n_keys; i++) {
+ int overflow = 0;
+ secp256k1_scalar_set_b32(&s[i], &sig->data[32 * (i + 1)], &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&s[i])) {
+ return 0;
+ }
+ }
+
+ /* Compute pubkeys: online_pubkey + tweaked(offline_pubkey + address), and message */
+ if (!secp256k1_whitelist_compute_keys_and_message(ctx, msg32, pubs, online_pubkeys, offline_pubkeys, sig->n_keys, sub_pubkey)) {
+ return 0;
+ }
+ /* Do verification */
+ return secp256k1_borromean_verify(&ctx->ecmult_ctx, NULL, &sig->data[0], s, pubs, &sig->n_keys, 1, msg32, 32);
+}
+
+size_t secp256k1_whitelist_signature_n_keys(const secp256k1_whitelist_signature *sig) {
+ return sig->n_keys;
+}
+
+int secp256k1_whitelist_signature_parse(const secp256k1_context* ctx, secp256k1_whitelist_signature *sig, const unsigned char *input, size_t input_len) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input != NULL);
+
+ if (input_len == 0) {
+ return 0;
+ }
+
+ sig->n_keys = input[0];
+ if (sig->n_keys >= MAX_KEYS || input_len != 1 + 32 * (sig->n_keys + 1)) {
+ return 0;
+ }
+ memcpy(&sig->data[0], &input[1], 32 * (sig->n_keys + 1));
+
+ return 1;
+}
+
+int secp256k1_whitelist_signature_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *output_len, const secp256k1_whitelist_signature *sig) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output != NULL);
+ ARG_CHECK(output_len != NULL);
+ ARG_CHECK(sig != NULL);
+
+ if (*output_len < 1 + 32 * (sig->n_keys + 1)) {
+ return 0;
+ }
+
+ output[0] = sig->n_keys;
+ memcpy(&output[1], &sig->data[0], 32 * (sig->n_keys + 1));
+ *output_len = 1 + 32 * (sig->n_keys + 1);
+
+ return 1;
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/tests_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/tests_impl.h
new file mode 100644
index 0000000..7cf1fb0
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/tests_impl.h
@@ -0,0 +1,151 @@
+/**********************************************************************
+ * Copyright (c) 2014-2016 Pieter Wuille, Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_WHITELIST_TESTS
+#define SECP256K1_MODULE_WHITELIST_TESTS
+
+#include "include/secp256k1_whitelist.h"
+
+void test_whitelist_end_to_end(const size_t n_keys) {
+ unsigned char **online_seckey = (unsigned char **) malloc(n_keys * sizeof(*online_seckey));
+ unsigned char **summed_seckey = (unsigned char **) malloc(n_keys * sizeof(*summed_seckey));
+ secp256k1_pubkey *online_pubkeys = (secp256k1_pubkey *) malloc(n_keys * sizeof(*online_pubkeys));
+ secp256k1_pubkey *offline_pubkeys = (secp256k1_pubkey *) malloc(n_keys * sizeof(*offline_pubkeys));
+
+ secp256k1_scalar ssub;
+ unsigned char csub[32];
+ secp256k1_pubkey sub_pubkey;
+
+ /* Generate random keys */
+ size_t i;
+ /* Start with subkey */
+ random_scalar_order_test(&ssub);
+ secp256k1_scalar_get_b32(csub, &ssub);
+ CHECK(secp256k1_ec_seckey_verify(ctx, csub) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &sub_pubkey, csub) == 1);
+ /* Then offline and online whitelist keys */
+ for (i = 0; i < n_keys; i++) {
+ secp256k1_scalar son, soff;
+
+ online_seckey[i] = (unsigned char *) malloc(32);
+ summed_seckey[i] = (unsigned char *) malloc(32);
+
+ /* Create two keys */
+ random_scalar_order_test(&son);
+ secp256k1_scalar_get_b32(online_seckey[i], &son);
+ CHECK(secp256k1_ec_seckey_verify(ctx, online_seckey[i]) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &online_pubkeys[i], online_seckey[i]) == 1);
+
+ random_scalar_order_test(&soff);
+ secp256k1_scalar_get_b32(summed_seckey[i], &soff);
+ CHECK(secp256k1_ec_seckey_verify(ctx, summed_seckey[i]) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &offline_pubkeys[i], summed_seckey[i]) == 1);
+
+ /* Make summed_seckey correspond to the sum of offline_pubkey and sub_pubkey */
+ secp256k1_scalar_add(&soff, &soff, &ssub);
+ secp256k1_scalar_get_b32(summed_seckey[i], &soff);
+ CHECK(secp256k1_ec_seckey_verify(ctx, summed_seckey[i]) == 1);
+ }
+
+ /* Sign/verify with each one */
+ for (i = 0; i < n_keys; i++) {
+ unsigned char serialized[32 + 4 + 32 * SECP256K1_WHITELIST_MAX_N_KEYS] = {0};
+ size_t slen = sizeof(serialized);
+ secp256k1_whitelist_signature sig;
+ secp256k1_whitelist_signature sig1;
+
+ CHECK(secp256k1_whitelist_sign(ctx, &sig, online_pubkeys, offline_pubkeys, n_keys, &sub_pubkey, online_seckey[i], summed_seckey[i], i, NULL, NULL));
+ CHECK(secp256k1_whitelist_verify(ctx, &sig, online_pubkeys, offline_pubkeys, n_keys, &sub_pubkey) == 1);
+ /* Check that exchanging keys causes a failure */
+ CHECK(secp256k1_whitelist_verify(ctx, &sig, offline_pubkeys, online_pubkeys, n_keys, &sub_pubkey) != 1);
+ /* Serialization round trip */
+ CHECK(secp256k1_whitelist_signature_serialize(ctx, serialized, &slen, &sig) == 1);
+ CHECK(slen == 33 + 32 * n_keys);
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig1, serialized, slen) == 1);
+ /* (Check various bad-length conditions) */
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig1, serialized, slen + 32) == 0);
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig1, serialized, slen + 1) == 0);
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig1, serialized, slen - 1) == 0);
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig1, serialized, 0) == 0);
+ CHECK(secp256k1_whitelist_verify(ctx, &sig1, online_pubkeys, offline_pubkeys, n_keys, &sub_pubkey) == 1);
+ CHECK(secp256k1_whitelist_verify(ctx, &sig1, offline_pubkeys, online_pubkeys, n_keys, &sub_pubkey) != 1);
+
+ /* Test n_keys */
+ CHECK(secp256k1_whitelist_signature_n_keys(&sig) == n_keys);
+ CHECK(secp256k1_whitelist_signature_n_keys(&sig1) == n_keys);
+
+ /* Test bad number of keys in signature */
+ sig.n_keys = n_keys + 1;
+ CHECK(secp256k1_whitelist_verify(ctx, &sig, offline_pubkeys, online_pubkeys, n_keys, &sub_pubkey) != 1);
+ sig.n_keys = n_keys;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ free(online_seckey[i]);
+ free(summed_seckey[i]);
+ }
+ free(online_seckey);
+ free(summed_seckey);
+ free(online_pubkeys);
+ free(offline_pubkeys);
+}
+
+void test_whitelist_bad_parse(void) {
+ secp256k1_whitelist_signature sig;
+
+ const unsigned char serialized0[] = { 1+32*(0+1) };
+ const unsigned char serialized1[] = {
+ 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06
+ };
+ const unsigned char serialized2[] = {
+ 0x01,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ };
+
+ /* Empty input */
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig, serialized0, 0) == 0);
+ /* Misses one byte of e0 */
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig, serialized1, sizeof(serialized1)) == 0);
+ /* Enough bytes for e0, but there is no s value */
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig, serialized2, sizeof(serialized2)) == 0);
+}
+
+void test_whitelist_bad_serialize(void) {
+ unsigned char serialized[] = {
+ 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ };
+ size_t serialized_len;
+ secp256k1_whitelist_signature sig;
+
+ CHECK(secp256k1_whitelist_signature_parse(ctx, &sig, serialized, sizeof(serialized)) == 1);
+ serialized_len = sizeof(serialized) - 1;
+ /* Output buffer is one byte too short */
+ CHECK(secp256k1_whitelist_signature_serialize(ctx, serialized, &serialized_len, &sig) == 0);
+}
+
+void run_whitelist_tests(void) {
+ int i;
+ test_whitelist_bad_parse();
+ test_whitelist_bad_serialize();
+ for (i = 0; i < count; i++) {
+ test_whitelist_end_to_end(1);
+ test_whitelist_end_to_end(10);
+ test_whitelist_end_to_end(50);
+ }
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/whitelist.md b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/whitelist.md
new file mode 100644
index 0000000..15ab998
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/whitelist.md
@@ -0,0 +1,96 @@
+Address Whitelisting Module
+===========================
+
+This module implements a scheme by which members of some group, having fixed
+signing keys, can prove control of an arbitrary other key without associating
+their own identity (only that they belong to the group) to the new key. The
+application is to patch ring-signature-like behaviour onto systems such as
+Bitcoin or PGP which do not directly support this.
+
+We refer to such delegation as "whitelisting" because we expect it to be used
+to build a dynamic whitelist of authorized keys.
+
+For example, imagine a private sidechain with a fixed membership set but
+stronger privacy properties than Bitcoin. When moving coins from this system
+to Bitcoin, it is desirable that the destination Bitcoin addresses be provably
+in control of some user of the sidechain. This prevents malicious or erroneous
+behaviour on the sidechain, which can likely be resolved by its participants,
+from translating to theft on the wider Bitcoin network, which is irreversible.
+
+### Unused Schemes and Design Rationale
+
+#### Direct Signing
+
+An obvious scheme for such delegation is to simply have participants sign the
+key they want to whitelist. To avoid revealing their specific identity, they
+could use a ring signature. The problem with this is that it really only proves
+that a participant *signed off* on a key, not that they control it. Thus any
+security failure that allows text substitution could be used to subvert this
+and redirect coins to an attacker-controlled address.
+
+#### Signing with Difference-of-Keys
+
+A less obvious scheme is to have a participant sign an arbitrary message with
+the sum of her key `P` and the whitelisted key `W`. Such a signature with the key
+`P + W` proves knowledge of either (a) discrete logarithms of both `P` and `W`;
+or (b) neither. This makes directly attacking participants' signing schemes much
+harder, but allows an attacker to whitelist arbitrary "garbage" keys by computing
+`W` as the difference between an attacker-controlled key and `P`. For Bitcoin,
+the effect of garbage keys is to "burn" stolen coins, destroying them.
+
+In an important sense, this "burning coins" attack is a good thing: it enables
+*offline delegation*. That is, the key `P` does not need to be available at the
+time of delegation. Instead, participants could choose `S = P + W`, sign with
+this to delegate, and only later compute the discrete logarithm of `W = P - S`.
+This allows `P` to be in cold storage or be otherwise inaccessible, improving
+the overall system security.
+
+#### Signing with Tweaked-Difference-of-Keys
+
+A modification of this scheme, which prevents this "garbage key" attack, is to
+instead have participants sign some message with the key `P + H(W)W`, for `H`
+some random-oracle hash that maps group elements to scalars. This key, and its
+discrete logarithm, cannot be known until after `W` is chosen, so `W` cannot
+be selected as the difference between it and `P`. (Note that `P` could still
+be some chosen difference; however `P` is a fixed key and must be verified
+out-of-band to have come from a legitimate participant anyway.)
+
+This scheme is almost what we want, but it no longer supports offline
+delegation. However, we can get this back by introducing a new key, `P'`,
+and signing with the key `P + H(W + P')(W + P')`. This gives us the best
+of both worlds: `P'` does not need to be online to delegate, allowing it
+to be securely stored and preventing real-time attacks; `P` does need to
+be online, but its compromise only allows an attacker to whitelist "garbage
+keys", not attacker-controlled ones.
+
+### Our Scheme
+
+Our scheme works as follows: each participant `i` chooses two keys, `P_i` and `Q_i`.
+We refer to `P_i` as the "online key" and `Q_i` as the "offline key". To whitelist
+a key `W`, the participant computes the key `L_j = P_j + H(W + Q_j)(W + Q_j)` for
+every participant `j`. Then she will know the discrete logarithm of `L_i` for her
+own `i`.
+
+Next, she signs a message containing every `P_i` and `Q_i` as well as `W` with
+a ring signature over all the keys `L_j`. This proves that she knows the discrete
+logarithm of some `L_i` (though it is zero-knowledge which one), and therefore
+knows:
+1. The discrete logarithms of all of `W`, `P_i` and `Q_i`; or
+2. The discrete logarithm of `P_i` but of *neither* `W` nor `Q_i`.
+In other words, compromise of the online key `P_i` allows an attacker to whitelist
+"garbage keys" for which nobody knows the discrete logarithm; to whitelist an
+attacker-controlled key, he must compromise both `P_i` and `Q_i`. This is difficult
+because by design, only the sum `S = W + Q_i` is used when signing; then by choosing
+`S` freely, a participant can delegate without the secret key to `Q_i` ever being online.
+(Later, when she wants to actually use `W`, she will need to compute its key as the
+difference between `S` and `Q_i`; but this can be done offline and much later
+and with more expensive security requirements.)
+
+The message to be signed contains all public keys to prevent a class of attacks
+centered around choosing keys to match pre-computed signatures. In our proposed
+use case, whitelisted keys already must be computed before they are signed, and
+the remaining public keys are verified out-of-band when setting up the system,
+so there is no direct benefit to this. We do it only to reduce fragility and
+increase safety of unforeseen uses.
+
+
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/whitelist_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/whitelist_impl.h
new file mode 100644
index 0000000..ff8d87f
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/modules/whitelist/whitelist_impl.h
@@ -0,0 +1,129 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_WHITELIST_IMPL_H_
+#define _SECP256K1_WHITELIST_IMPL_H_
+
+static int secp256k1_whitelist_hash_pubkey(secp256k1_scalar* output, secp256k1_gej* pubkey) {
+ unsigned char h[32];
+ unsigned char c[33];
+ secp256k1_sha256 sha;
+ int overflow = 0;
+ size_t size = 33;
+ secp256k1_ge ge;
+
+ secp256k1_ge_set_gej(&ge, pubkey);
+
+ secp256k1_sha256_initialize(&sha);
+ if (!secp256k1_eckey_pubkey_serialize(&ge, c, &size, SECP256K1_EC_COMPRESSED)) {
+ return 0;
+ }
+ secp256k1_sha256_write(&sha, c, size);
+ secp256k1_sha256_finalize(&sha, h);
+
+ secp256k1_scalar_set_b32(output, h, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(output)) {
+ /* This return path is mathematically impossible to hit */
+ secp256k1_scalar_clear(output);
+ return 0;
+ }
+ return 1;
+}
+
+static int secp256k1_whitelist_tweak_pubkey(const secp256k1_context* ctx, secp256k1_gej* pub_tweaked) {
+ secp256k1_scalar tweak;
+ secp256k1_scalar zero;
+ int ret;
+
+ secp256k1_scalar_set_int(&zero, 0);
+
+ ret = secp256k1_whitelist_hash_pubkey(&tweak, pub_tweaked);
+ if (ret) {
+ secp256k1_ecmult(&ctx->ecmult_ctx, pub_tweaked, pub_tweaked, &tweak, &zero);
+ }
+ return ret;
+}
+
+static int secp256k1_whitelist_compute_tweaked_privkey(const secp256k1_context* ctx, secp256k1_scalar* skey, const unsigned char *online_key, const unsigned char *summed_key) {
+ secp256k1_scalar tweak;
+ int ret = 1;
+ int overflow = 0;
+
+ secp256k1_scalar_set_b32(skey, summed_key, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(skey)) {
+ ret = 0;
+ }
+ if (ret) {
+ secp256k1_gej pkeyj;
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pkeyj, skey);
+ ret = secp256k1_whitelist_hash_pubkey(&tweak, &pkeyj);
+ }
+ if (ret) {
+ secp256k1_scalar sonline;
+ secp256k1_scalar_mul(skey, skey, &tweak);
+
+ secp256k1_scalar_set_b32(&sonline, online_key, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&sonline)) {
+ ret = 0;
+ }
+ secp256k1_scalar_add(skey, skey, &sonline);
+ secp256k1_scalar_clear(&sonline);
+ secp256k1_scalar_clear(&tweak);
+ }
+
+ if (!ret) {
+ secp256k1_scalar_clear(skey);
+ }
+ return ret;
+}
+
+/* Takes a list of pubkeys and combines them to form the public keys needed
+ * for the ring signature; also produce a commitment to every one that will
+ * be our "message". */
+static int secp256k1_whitelist_compute_keys_and_message(const secp256k1_context* ctx, unsigned char *msg32, secp256k1_gej *keys, const secp256k1_pubkey *online_pubkeys, const secp256k1_pubkey *offline_pubkeys, const int n_keys, const secp256k1_pubkey *sub_pubkey) {
+ unsigned char c[33];
+ size_t size = 33;
+ secp256k1_sha256 sha;
+ int i;
+ secp256k1_ge subkey_ge;
+
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_pubkey_load(ctx, &subkey_ge, sub_pubkey);
+
+ /* commit to sub-key */
+ if (!secp256k1_eckey_pubkey_serialize(&subkey_ge, c, &size, SECP256K1_EC_COMPRESSED)) {
+ return 0;
+ }
+ secp256k1_sha256_write(&sha, c, size);
+ for (i = 0; i < n_keys; i++) {
+ secp256k1_ge offline_ge;
+ secp256k1_ge online_ge;
+ secp256k1_gej tweaked_gej;
+
+ /* commit to fixed keys */
+ secp256k1_pubkey_load(ctx, &offline_ge, &offline_pubkeys[i]);
+ if (!secp256k1_eckey_pubkey_serialize(&offline_ge, c, &size, SECP256K1_EC_COMPRESSED)) {
+ return 0;
+ }
+ secp256k1_sha256_write(&sha, c, size);
+ secp256k1_pubkey_load(ctx, &online_ge, &online_pubkeys[i]);
+ if (!secp256k1_eckey_pubkey_serialize(&online_ge, c, &size, SECP256K1_EC_COMPRESSED)) {
+ return 0;
+ }
+ secp256k1_sha256_write(&sha, c, size);
+
+ /* compute tweaked keys */
+ secp256k1_gej_set_ge(&tweaked_gej, &offline_ge);
+ secp256k1_gej_add_ge_var(&tweaked_gej, &tweaked_gej, &subkey_ge, NULL);
+ secp256k1_whitelist_tweak_pubkey(ctx, &tweaked_gej);
+ secp256k1_gej_add_ge_var(&keys[i], &tweaked_gej, &online_ge, NULL);
+ }
+ secp256k1_sha256_finalize(&sha, msg32);
+ return 1;
+}
+
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num.h
new file mode 100644
index 0000000..49f2dd7
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num.h
@@ -0,0 +1,74 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_NUM_H
+#define SECP256K1_NUM_H
+
+#ifndef USE_NUM_NONE
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(USE_NUM_GMP)
+#include "num_gmp.h"
+#else
+#error "Please select num implementation"
+#endif
+
+/** Copy a number. */
+static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a);
+
+/** Convert a number's absolute value to a binary big-endian string.
+ * There must be enough place. */
+static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a);
+
+/** Set a number to the value of a binary big-endian string. */
+static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen);
+
+/** Compute a modular inverse. The input must be less than the modulus. */
+static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m);
+
+/** Compute the jacobi symbol (a|b). b must be positive and odd. */
+static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b);
+
+/** Compare the absolute value of two numbers. */
+static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b);
+
+/** Test whether two number are equal (including sign). */
+static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b);
+
+/** Add two (signed) numbers. */
+static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Subtract two (signed) numbers. */
+static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Multiply two (signed) numbers. */
+static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1,
+ even if r was negative. */
+static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m);
+
+/** Right-shift the passed number by bits bits. */
+static void secp256k1_num_shift(secp256k1_num *r, int bits);
+
+/** Check whether a number is zero. */
+static int secp256k1_num_is_zero(const secp256k1_num *a);
+
+/** Check whether a number is one. */
+static int secp256k1_num_is_one(const secp256k1_num *a);
+
+/** Check whether a number is strictly negative. */
+static int secp256k1_num_is_neg(const secp256k1_num *a);
+
+/** Change a number's sign. */
+static void secp256k1_num_negate(secp256k1_num *r);
+
+#endif
+
+#endif /* SECP256K1_NUM_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_gmp.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_gmp.h
new file mode 100644
index 0000000..3619844
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_gmp.h
@@ -0,0 +1,20 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_NUM_REPR_H
+#define SECP256K1_NUM_REPR_H
+
+#include
+
+#define NUM_LIMBS ((256+GMP_NUMB_BITS-1)/GMP_NUMB_BITS)
+
+typedef struct {
+ mp_limb_t data[2*NUM_LIMBS];
+ int neg;
+ int limbs;
+} secp256k1_num;
+
+#endif /* SECP256K1_NUM_REPR_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h
new file mode 100644
index 0000000..0ae2a8b
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h
@@ -0,0 +1,288 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_NUM_REPR_IMPL_H
+#define SECP256K1_NUM_REPR_IMPL_H
+
+#include
+#include
+#include
+
+#include "util.h"
+#include "num.h"
+
+#ifdef VERIFY
+static void secp256k1_num_sanity(const secp256k1_num *a) {
+ VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0));
+}
+#else
+#define secp256k1_num_sanity(a) do { } while(0)
+#endif
+
+static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a) {
+ *r = *a;
+}
+
+static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a) {
+ unsigned char tmp[65];
+ int len = 0;
+ int shift = 0;
+ if (a->limbs>1 || a->data[0] != 0) {
+ len = mpn_get_str(tmp, 256, (mp_limb_t*)a->data, a->limbs);
+ }
+ while (shift < len && tmp[shift] == 0) shift++;
+ VERIFY_CHECK(len-shift <= (int)rlen);
+ memset(r, 0, rlen - len + shift);
+ if (len > shift) {
+ memcpy(r + rlen - len + shift, tmp + shift, len - shift);
+ }
+ memset(tmp, 0, sizeof(tmp));
+}
+
+static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen) {
+ int len;
+ VERIFY_CHECK(alen > 0);
+ VERIFY_CHECK(alen <= 64);
+ len = mpn_set_str(r->data, a, alen, 256);
+ if (len == 0) {
+ r->data[0] = 0;
+ len = 1;
+ }
+ VERIFY_CHECK(len <= NUM_LIMBS*2);
+ r->limbs = len;
+ r->neg = 0;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs);
+ r->limbs = a->limbs;
+ if (c != 0) {
+ VERIFY_CHECK(r->limbs < 2*NUM_LIMBS);
+ r->data[r->limbs++] = c;
+ }
+}
+
+static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs);
+ (void)c;
+ VERIFY_CHECK(c == 0);
+ r->limbs = a->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m) {
+ secp256k1_num_sanity(r);
+ secp256k1_num_sanity(m);
+
+ if (r->limbs >= m->limbs) {
+ mp_limb_t t[2*NUM_LIMBS];
+ mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs);
+ memset(t, 0, sizeof(t));
+ r->limbs = m->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+ }
+
+ if (r->neg && (r->limbs > 1 || r->data[0] != 0)) {
+ secp256k1_num_sub_abs(r, m, r);
+ r->neg = 0;
+ }
+}
+
+static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m) {
+ int i;
+ mp_limb_t g[NUM_LIMBS+1];
+ mp_limb_t u[NUM_LIMBS+1];
+ mp_limb_t v[NUM_LIMBS+1];
+ mp_size_t sn;
+ mp_size_t gn;
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(m);
+
+ /** mpn_gcdext computes: (G,S) = gcdext(U,V), where
+ * * G = gcd(U,V)
+ * * G = U*S + V*T
+ * * U has equal or more limbs than V, and V has no padding
+ * If we set U to be (a padded version of) a, and V = m:
+ * G = a*S + m*T
+ * G = a*S mod m
+ * Assuming G=1:
+ * S = 1/a mod m
+ */
+ VERIFY_CHECK(m->limbs <= NUM_LIMBS);
+ VERIFY_CHECK(m->data[m->limbs-1] != 0);
+ for (i = 0; i < m->limbs; i++) {
+ u[i] = (i < a->limbs) ? a->data[i] : 0;
+ v[i] = m->data[i];
+ }
+ sn = NUM_LIMBS+1;
+ gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs);
+ (void)gn;
+ VERIFY_CHECK(gn == 1);
+ VERIFY_CHECK(g[0] == 1);
+ r->neg = a->neg ^ m->neg;
+ if (sn < 0) {
+ mpn_sub(r->data, m->data, m->limbs, r->data, -sn);
+ r->limbs = m->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+ } else {
+ r->limbs = sn;
+ }
+ memset(g, 0, sizeof(g));
+ memset(u, 0, sizeof(u));
+ memset(v, 0, sizeof(v));
+}
+
+static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b) {
+ int ret;
+ mpz_t ga, gb;
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+ VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1));
+
+ mpz_inits(ga, gb, NULL);
+
+ mpz_import(gb, b->limbs, -1, sizeof(mp_limb_t), 0, 0, b->data);
+ mpz_import(ga, a->limbs, -1, sizeof(mp_limb_t), 0, 0, a->data);
+ if (a->neg) {
+ mpz_neg(ga, ga);
+ }
+
+ ret = mpz_jacobi(ga, gb);
+
+ mpz_clears(ga, gb, NULL);
+
+ return ret;
+}
+
+static int secp256k1_num_is_one(const secp256k1_num *a) {
+ return (a->limbs == 1 && a->data[0] == 1);
+}
+
+static int secp256k1_num_is_zero(const secp256k1_num *a) {
+ return (a->limbs == 1 && a->data[0] == 0);
+}
+
+static int secp256k1_num_is_neg(const secp256k1_num *a) {
+ return (a->limbs > 1 || a->data[0] != 0) && a->neg;
+}
+
+static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b) {
+ if (a->limbs > b->limbs) {
+ return 1;
+ }
+ if (a->limbs < b->limbs) {
+ return -1;
+ }
+ return mpn_cmp(a->data, b->data, a->limbs);
+}
+
+static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b) {
+ if (a->limbs > b->limbs) {
+ return 0;
+ }
+ if (a->limbs < b->limbs) {
+ return 0;
+ }
+ if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) {
+ return 0;
+ }
+ return mpn_cmp(a->data, b->data, a->limbs) == 0;
+}
+
+static void secp256k1_num_subadd(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b, int bneg) {
+ if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */
+ r->neg = a->neg;
+ if (a->limbs >= b->limbs) {
+ secp256k1_num_add_abs(r, a, b);
+ } else {
+ secp256k1_num_add_abs(r, b, a);
+ }
+ } else {
+ if (secp256k1_num_cmp(a, b) > 0) {
+ r->neg = a->neg;
+ secp256k1_num_sub_abs(r, a, b);
+ } else {
+ r->neg = b->neg ^ bneg;
+ secp256k1_num_sub_abs(r, b, a);
+ }
+ }
+}
+
+static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+ secp256k1_num_subadd(r, a, b, 0);
+}
+
+static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+ secp256k1_num_subadd(r, a, b, 1);
+}
+
+static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t tmp[2*NUM_LIMBS+1];
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+
+ VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1);
+ if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) {
+ r->limbs = 1;
+ r->neg = 0;
+ r->data[0] = 0;
+ return;
+ }
+ if (a->limbs >= b->limbs) {
+ mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs);
+ } else {
+ mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs);
+ }
+ r->limbs = a->limbs + b->limbs;
+ if (r->limbs > 1 && tmp[r->limbs - 1]==0) {
+ r->limbs--;
+ }
+ VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS);
+ mpn_copyi(r->data, tmp, r->limbs);
+ r->neg = a->neg ^ b->neg;
+ memset(tmp, 0, sizeof(tmp));
+}
+
+static void secp256k1_num_shift(secp256k1_num *r, int bits) {
+ if (bits % GMP_NUMB_BITS) {
+ /* Shift within limbs. */
+ mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS);
+ }
+ if (bits >= GMP_NUMB_BITS) {
+ int i;
+ /* Shift full limbs. */
+ for (i = 0; i < r->limbs; i++) {
+ int index = i + (bits / GMP_NUMB_BITS);
+ if (index < r->limbs && index < 2*NUM_LIMBS) {
+ r->data[i] = r->data[index];
+ } else {
+ r->data[i] = 0;
+ }
+ }
+ }
+ while (r->limbs>1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_negate(secp256k1_num *r) {
+ r->neg ^= 1;
+}
+
+#endif /* SECP256K1_NUM_REPR_IMPL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_impl.h
new file mode 100644
index 0000000..c45193b
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/num_impl.h
@@ -0,0 +1,24 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_NUM_IMPL_H
+#define SECP256K1_NUM_IMPL_H
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include "num.h"
+
+#if defined(USE_NUM_GMP)
+#include "num_gmp_impl.h"
+#elif defined(USE_NUM_NONE)
+/* Nothing. */
+#else
+#error "Please select num implementation"
+#endif
+
+#endif /* SECP256K1_NUM_IMPL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar.h
new file mode 100644
index 0000000..57389da
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar.h
@@ -0,0 +1,112 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SCALAR_H
+#define SECP256K1_SCALAR_H
+
+#include "num.h"
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(EXHAUSTIVE_TEST_ORDER)
+#include "scalar_low.h"
+#elif defined(USE_SCALAR_4X64)
+#include "scalar_4x64.h"
+#elif defined(USE_SCALAR_8X32)
+#include "scalar_8x32.h"
+#else
+#error "Please select scalar implementation"
+#endif
+
+/** Clear a scalar to prevent the leak of sensitive data. */
+static void secp256k1_scalar_clear(secp256k1_scalar *r);
+
+/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */
+static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
+
+/** Access bits from a scalar. Not constant time. */
+static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
+
+/** Set a scalar from a big endian byte array. */
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow);
+
+/** Set a scalar to an unsigned integer. */
+static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v);
+
+/** Set a scalar to an unsigned 64-bit integer */
+static void secp256k1_scalar_set_u64(secp256k1_scalar *r, uint64_t v);
+
+/** Convert a scalar to a byte array. */
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a);
+
+/** Add two scalars together (modulo the group order). Returns whether it overflowed. */
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+/** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag);
+
+/** Multiply two scalars (modulo the group order). */
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+/** Shift a scalar right by some amount strictly between 0 and 16, returning
+ * the low bits that were shifted off */
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n);
+
+/** Compute the square of a scalar (modulo the group order). */
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the inverse of a scalar (modulo the group order). */
+static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */
+static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the complement of a scalar (modulo the group order). */
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Check whether a scalar equals zero. */
+static int secp256k1_scalar_is_zero(const secp256k1_scalar *a);
+
+/** Check whether a scalar equals one. */
+static int secp256k1_scalar_is_one(const secp256k1_scalar *a);
+
+/** Check whether a scalar, considered as an nonnegative integer, is even. */
+static int secp256k1_scalar_is_even(const secp256k1_scalar *a);
+
+/** Check whether a scalar is higher than the group order divided by 2. */
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a);
+
+/** Conditionally negate a number, in constant time.
+ * Returns -1 if the number was negated, 1 otherwise */
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *a, int flag);
+
+#ifndef USE_NUM_NONE
+/** Convert a scalar to a number. */
+static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a);
+
+/** Get the order of the group as a number. */
+static void secp256k1_scalar_order_get_num(secp256k1_num *r);
+#endif
+
+/** Compare two scalars. */
+static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+#ifdef USE_ENDOMORPHISM
+/** Find r1 and r2 such that r1+r2*2^128 = a. */
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
+/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
+#endif
+
+/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
+static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift);
+
+/** Generate two scalars from a 32-byte seed and an integer using the chacha20 stream cipher */
+static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t idx);
+
+#endif /* SECP256K1_SCALAR_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h
new file mode 100644
index 0000000..19c7495
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h
@@ -0,0 +1,19 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SCALAR_REPR_H
+#define SECP256K1_SCALAR_REPR_H
+
+#include
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef struct {
+ uint64_t d[4];
+} secp256k1_scalar;
+
+#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}}
+
+#endif /* SECP256K1_SCALAR_REPR_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h
new file mode 100644
index 0000000..2673caa
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h
@@ -0,0 +1,1049 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SCALAR_REPR_IMPL_H
+#define SECP256K1_SCALAR_REPR_IMPL_H
+
+#include "scalar.h"
+#include
+
+/* Limbs of the secp256k1 order. */
+#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
+#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
+#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
+#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+
+/* Limbs of 2^256 minus the secp256k1 order. */
+#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
+#define SECP256K1_N_C_1 (~SECP256K1_N_1)
+#define SECP256K1_N_C_2 (1)
+
+/* Limbs of half the secp256k1 order. */
+#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
+#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
+#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
+ r->d[0] = 0;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
+ r->d[0] = v;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_set_u64(secp256k1_scalar *r, uint64_t v) {
+ r->d[0] = v;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
+ return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK(count < 32);
+ VERIFY_CHECK(offset + count <= 256);
+ if ((offset + count - 1) >> 6 == offset >> 6) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+ } else {
+ VERIFY_CHECK((offset >> 6) + 1 < 4);
+ return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
+ no |= (a->d[2] < SECP256K1_N_2);
+ yes |= (a->d[2] > SECP256K1_N_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_1);
+ yes |= (a->d[1] > SECP256K1_N_1) & ~no;
+ yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
+ return yes;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
+ uint128_t t;
+ VERIFY_CHECK(overflow <= 1);
+ t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0;
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1;
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2;
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint64_t)r->d[3];
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
+ return overflow;
+}
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ int overflow;
+ uint128_t t = (uint128_t)a->d[0] + b->d[0];
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[1] + b->d[1];
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[2] + b->d[2];
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[3] + b->d[3];
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ overflow = t + secp256k1_scalar_check_overflow(r);
+ VERIFY_CHECK(overflow == 0 || overflow == 1);
+ secp256k1_scalar_reduce(r, overflow);
+ return overflow;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ uint128_t t;
+ VERIFY_CHECK(bit < 256);
+ bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
+ t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
+#ifdef VERIFY
+ VERIFY_CHECK((t >> 64) == 0);
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ int over;
+ r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
+ r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
+ r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
+ r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
+ over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
+ if (overflow) {
+ *overflow = over;
+ }
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
+ bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
+ bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
+ bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
+ uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1;
+ r->d[0] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[1]) + SECP256K1_N_1;
+ r->d[1] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[2]) + SECP256K1_N_2;
+ r->d[2] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[3]) + SECP256K1_N_3;
+ r->d[3] = t & nonzero;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[3] < SECP256K1_N_H_3);
+ yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
+ no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
+ yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
+ return yes;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ /* If we are flag = 0, mask = 00...00 and this is a no-op;
+ * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
+ uint64_t mask = !flag - 1;
+ uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
+ uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
+ r->d[0] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
+ r->d[1] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask);
+ r->d[2] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask);
+ r->d[3] = t & nonzero;
+ return 2 * (mask == 0) - 1;
+}
+
+/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
+
+/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd(a,b) { \
+ uint64_t tl, th; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c1 += th; /* overflow is handled on the next line */ \
+ c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
+}
+
+/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
+#define muladd_fast(a,b) { \
+ uint64_t tl, th; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c1 += th; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK(c1 >= th); \
+}
+
+/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd2(a,b) { \
+ uint64_t tl, th, th2, tl2; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
+ c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
+ tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
+ th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c0 += tl2; /* overflow is handled on the next line */ \
+ th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
+ c1 += th2; /* overflow is handled on the next line */ \
+ c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
+}
+
+/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define sumadd(a) { \
+ unsigned int over; \
+ c0 += (a); /* overflow is handled on the next line */ \
+ over = (c0 < (a)) ? 1 : 0; \
+ c1 += over; /* overflow is handled on the next line */ \
+ c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+}
+
+/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
+#define sumadd_fast(a) { \
+ c0 += (a); /* overflow is handled on the next line */ \
+ c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */
+#define extract(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = c2; \
+ c2 = 0; \
+}
+
+/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */
+#define extract_fast(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = 0; \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
+#ifdef USE_ASM_X86_64
+ /* Reduce 512 bits into 385. */
+ uint64_t m0, m1, m2, m3, m4, m5, m6;
+ uint64_t p0, p1, p2, p3, p4;
+ uint64_t c;
+
+ __asm__ __volatile__(
+ /* Preload. */
+ "movq 32(%%rsi), %%r11\n"
+ "movq 40(%%rsi), %%r12\n"
+ "movq 48(%%rsi), %%r13\n"
+ "movq 56(%%rsi), %%r14\n"
+ /* Initialize r8,r9,r10 */
+ "movq 0(%%rsi), %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9) += n0 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* extract m0 */
+ "movq %%r8, %q0\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10) += l1 */
+ "addq 8(%%rsi), %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r9,r10,r8) += n1 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += n0 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m1 */
+ "movq %%r9, %q1\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += l2 */
+ "addq 16(%%rsi), %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n2 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n1 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n0 */
+ "addq %%r11, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract m2 */
+ "movq %%r10, %q2\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += l3 */
+ "addq 24(%%rsi), %%r8\n"
+ "adcq $0, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n3 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n2 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n1 */
+ "addq %%r12, %%r8\n"
+ "adcq $0, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* extract m3 */
+ "movq %%r8, %q3\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += n3 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += n2 */
+ "addq %%r13, %%r9\n"
+ "adcq $0, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m4 */
+ "movq %%r9, %q4\n"
+ /* (r10,r8) += n3 */
+ "addq %%r14, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m5 */
+ "movq %%r10, %q5\n"
+ /* extract m6 */
+ "movq %%r8, %q6\n"
+ : "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
+ : "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
+
+ /* Reduce 385 bits into 258. */
+ __asm__ __volatile__(
+ /* Preload */
+ "movq %q9, %%r11\n"
+ "movq %q10, %%r12\n"
+ "movq %q11, %%r13\n"
+ /* Initialize (r8,r9,r10) */
+ "movq %q5, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9) += m4 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* extract p0 */
+ "movq %%r8, %q0\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10) += m1 */
+ "addq %q6, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r9,r10,r8) += m5 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += m4 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract p1 */
+ "movq %%r9, %q1\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += m2 */
+ "addq %q7, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m6 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m5 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m4 */
+ "addq %%r11, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract p2 */
+ "movq %%r10, %q2\n"
+ /* (r8,r9) += m3 */
+ "addq %q8, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r8,r9) += m6 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* (r8,r9) += m5 */
+ "addq %%r12, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract p3 */
+ "movq %%r8, %q3\n"
+ /* (r9) += m6 */
+ "addq %%r13, %%r9\n"
+ /* extract p4 */
+ "movq %%r9, %q4\n"
+ : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
+ : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
+
+ /* Reduce 258 bits into 256. */
+ __asm__ __volatile__(
+ /* Preload */
+ "movq %q5, %%r10\n"
+ /* (rax,rdx) = p4 * c0 */
+ "movq %7, %%rax\n"
+ "mulq %%r10\n"
+ /* (rax,rdx) += p0 */
+ "addq %q1, %%rax\n"
+ "adcq $0, %%rdx\n"
+ /* extract r0 */
+ "movq %%rax, 0(%q6)\n"
+ /* Move to (r8,r9) */
+ "movq %%rdx, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ /* (r8,r9) += p1 */
+ "addq %q2, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r8,r9) += p4 * c1 */
+ "movq %8, %%rax\n"
+ "mulq %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* Extract r1 */
+ "movq %%r8, 8(%q6)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r8) += p4 */
+ "addq %%r10, %%r9\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r8) += p2 */
+ "addq %q3, %%r9\n"
+ "adcq $0, %%r8\n"
+ /* Extract r2 */
+ "movq %%r9, 16(%q6)\n"
+ "xorq %%r9, %%r9\n"
+ /* (r8,r9) += p3 */
+ "addq %q4, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract r3 */
+ "movq %%r8, 24(%q6)\n"
+ /* Extract c */
+ "movq %%r9, %q0\n"
+ : "=g"(c)
+ : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
+#else
+ uint128_t c;
+ uint64_t c0, c1, c2;
+ uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
+ uint64_t m0, m1, m2, m3, m4, m5;
+ uint32_t m6;
+ uint64_t p0, p1, p2, p3;
+ uint32_t p4;
+
+ /* Reduce 512 bits into 385. */
+ /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
+ c0 = l[0]; c1 = 0; c2 = 0;
+ muladd_fast(n0, SECP256K1_N_C_0);
+ extract_fast(m0);
+ sumadd_fast(l[1]);
+ muladd(n1, SECP256K1_N_C_0);
+ muladd(n0, SECP256K1_N_C_1);
+ extract(m1);
+ sumadd(l[2]);
+ muladd(n2, SECP256K1_N_C_0);
+ muladd(n1, SECP256K1_N_C_1);
+ sumadd(n0);
+ extract(m2);
+ sumadd(l[3]);
+ muladd(n3, SECP256K1_N_C_0);
+ muladd(n2, SECP256K1_N_C_1);
+ sumadd(n1);
+ extract(m3);
+ muladd(n3, SECP256K1_N_C_1);
+ sumadd(n2);
+ extract(m4);
+ sumadd_fast(n3);
+ extract_fast(m5);
+ VERIFY_CHECK(c0 <= 1);
+ m6 = c0;
+
+ /* Reduce 385 bits into 258. */
+ /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
+ c0 = m0; c1 = 0; c2 = 0;
+ muladd_fast(m4, SECP256K1_N_C_0);
+ extract_fast(p0);
+ sumadd_fast(m1);
+ muladd(m5, SECP256K1_N_C_0);
+ muladd(m4, SECP256K1_N_C_1);
+ extract(p1);
+ sumadd(m2);
+ muladd(m6, SECP256K1_N_C_0);
+ muladd(m5, SECP256K1_N_C_1);
+ sumadd(m4);
+ extract(p2);
+ sumadd_fast(m3);
+ muladd_fast(m6, SECP256K1_N_C_1);
+ sumadd_fast(m5);
+ extract_fast(p3);
+ p4 = c0 + m6;
+ VERIFY_CHECK(p4 <= 2);
+
+ /* Reduce 258 bits into 256. */
+ /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
+ c = p0 + (uint128_t)SECP256K1_N_C_0 * p4;
+ r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p1 + (uint128_t)SECP256K1_N_C_1 * p4;
+ r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p2 + (uint128_t)p4;
+ r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p3;
+ r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+#endif
+
+ /* Final reduction of r. */
+ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
+}
+
+static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
+#ifdef USE_ASM_X86_64
+ const uint64_t *pb = b->d;
+ __asm__ __volatile__(
+ /* Preload */
+ "movq 0(%%rdi), %%r15\n"
+ "movq 8(%%rdi), %%rbx\n"
+ "movq 16(%%rdi), %%rcx\n"
+ "movq 0(%%rdx), %%r11\n"
+ "movq 8(%%rdx), %%r12\n"
+ "movq 16(%%rdx), %%r13\n"
+ "movq 24(%%rdx), %%r14\n"
+ /* (rax,rdx) = a0 * b0 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r11\n"
+ /* Extract l0 */
+ "movq %%rax, 0(%%rsi)\n"
+ /* (r8,r9,r10) = (rdx) */
+ "movq %%rdx, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += a0 * b1 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a1 * b0 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l1 */
+ "movq %%r8, 8(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += a0 * b2 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a1 * b1 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a2 * b0 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l2 */
+ "movq %%r9, 16(%%rsi)\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += a0 * b3 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Preload a3 */
+ "movq 24(%%rdi), %%r15\n"
+ /* (r10,r8,r9) += a1 * b2 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += a2 * b1 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += a3 * b0 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract l3 */
+ "movq %%r10, 24(%%rsi)\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += a1 * b3 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a2 * b2 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a3 * b1 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l4 */
+ "movq %%r8, 32(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += a2 * b3 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a3 * b2 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l5 */
+ "movq %%r9, 40(%%rsi)\n"
+ /* (r10,r8) += a3 * b3 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ /* Extract l6 */
+ "movq %%r10, 48(%%rsi)\n"
+ /* Extract l7 */
+ "movq %%r8, 56(%%rsi)\n"
+ : "+d"(pb)
+ : "S"(l), "D"(a->d)
+ : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
+#else
+ /* 160 bit accumulator. */
+ uint64_t c0 = 0, c1 = 0;
+ uint32_t c2 = 0;
+
+ /* l[0..7] = a[0..3] * b[0..3]. */
+ muladd_fast(a->d[0], b->d[0]);
+ extract_fast(l[0]);
+ muladd(a->d[0], b->d[1]);
+ muladd(a->d[1], b->d[0]);
+ extract(l[1]);
+ muladd(a->d[0], b->d[2]);
+ muladd(a->d[1], b->d[1]);
+ muladd(a->d[2], b->d[0]);
+ extract(l[2]);
+ muladd(a->d[0], b->d[3]);
+ muladd(a->d[1], b->d[2]);
+ muladd(a->d[2], b->d[1]);
+ muladd(a->d[3], b->d[0]);
+ extract(l[3]);
+ muladd(a->d[1], b->d[3]);
+ muladd(a->d[2], b->d[2]);
+ muladd(a->d[3], b->d[1]);
+ extract(l[4]);
+ muladd(a->d[2], b->d[3]);
+ muladd(a->d[3], b->d[2]);
+ extract(l[5]);
+ muladd_fast(a->d[3], b->d[3]);
+ extract_fast(l[6]);
+ VERIFY_CHECK(c1 == 0);
+ l[7] = c0;
+#endif
+}
+
+static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) {
+#ifdef USE_ASM_X86_64
+ __asm__ __volatile__(
+ /* Preload */
+ "movq 0(%%rdi), %%r11\n"
+ "movq 8(%%rdi), %%r12\n"
+ "movq 16(%%rdi), %%r13\n"
+ "movq 24(%%rdi), %%r14\n"
+ /* (rax,rdx) = a0 * a0 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r11\n"
+ /* Extract l0 */
+ "movq %%rax, 0(%%rsi)\n"
+ /* (r8,r9,r10) = (rdx,0) */
+ "movq %%rdx, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += 2 * a0 * a1 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l1 */
+ "movq %%r8, 8(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += 2 * a0 * a2 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a1 * a1 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l2 */
+ "movq %%r9, 16(%%rsi)\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += 2 * a0 * a3 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += 2 * a1 * a2 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract l3 */
+ "movq %%r10, 24(%%rsi)\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += 2 * a1 * a3 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a2 * a2 */
+ "movq %%r13, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l4 */
+ "movq %%r8, 32(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += 2 * a2 * a3 */
+ "movq %%r13, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l5 */
+ "movq %%r9, 40(%%rsi)\n"
+ /* (r10,r8) += a3 * a3 */
+ "movq %%r14, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ /* Extract l6 */
+ "movq %%r10, 48(%%rsi)\n"
+ /* Extract l7 */
+ "movq %%r8, 56(%%rsi)\n"
+ :
+ : "S"(l), "D"(a->d)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory");
+#else
+ /* 160 bit accumulator. */
+ uint64_t c0 = 0, c1 = 0;
+ uint32_t c2 = 0;
+
+ /* l[0..7] = a[0..3] * b[0..3]. */
+ muladd_fast(a->d[0], a->d[0]);
+ extract_fast(l[0]);
+ muladd2(a->d[0], a->d[1]);
+ extract(l[1]);
+ muladd2(a->d[0], a->d[2]);
+ muladd(a->d[1], a->d[1]);
+ extract(l[2]);
+ muladd2(a->d[0], a->d[3]);
+ muladd2(a->d[1], a->d[2]);
+ extract(l[3]);
+ muladd2(a->d[1], a->d[3]);
+ muladd(a->d[2], a->d[2]);
+ extract(l[4]);
+ muladd2(a->d[2], a->d[3]);
+ extract(l[5]);
+ muladd_fast(a->d[3], a->d[3]);
+ extract_fast(l[6]);
+ VERIFY_CHECK(c1 == 0);
+ l[7] = c0;
+#endif
+}
+
+#undef sumadd
+#undef sumadd_fast
+#undef muladd
+#undef muladd_fast
+#undef muladd2
+#undef extract
+#undef extract_fast
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ uint64_t l[8];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = r->d[0] & ((1 << n) - 1);
+ r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
+ r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
+ r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
+ r->d[3] = (r->d[3] >> n);
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint64_t l[8];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+#ifdef USE_ENDOMORPHISM
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ r1->d[0] = a->d[0];
+ r1->d[1] = a->d[1];
+ r1->d[2] = 0;
+ r1->d[3] = 0;
+ r2->d[0] = a->d[2];
+ r2->d[1] = a->d[3];
+ r2->d[2] = 0;
+ r2->d[3] = 0;
+}
+#endif
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
+ uint64_t l[8];
+ unsigned int shiftlimbs;
+ unsigned int shiftlow;
+ unsigned int shifthigh;
+ VERIFY_CHECK(shift >= 256);
+ secp256k1_scalar_mul_512(l, a, b);
+ shiftlimbs = shift >> 6;
+ shiftlow = shift & 0x3F;
+ shifthigh = 64 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
+ secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
+}
+
+#define ROTL32(x,n) ((x) << (n) | (x) >> (32-(n)))
+#define QUARTERROUND(a,b,c,d) \
+ a += b; d = ROTL32(d ^ a, 16); \
+ c += d; b = ROTL32(b ^ c, 12); \
+ a += b; d = ROTL32(d ^ a, 8); \
+ c += d; b = ROTL32(b ^ c, 7);
+
+#ifdef WORDS_BIGENDIAN
+#define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
+#define BE32(p) (p)
+#else
+#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
+#define LE32(p) (p)
+#endif
+
+static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t idx) {
+ size_t n;
+ size_t over_count = 0;
+ uint32_t seed32[8];
+ uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
+ int over1, over2;
+
+ memcpy((void *) seed32, (const void *) seed, 32);
+ do {
+ x0 = 0x61707865;
+ x1 = 0x3320646e;
+ x2 = 0x79622d32;
+ x3 = 0x6b206574;
+ x4 = LE32(seed32[0]);
+ x5 = LE32(seed32[1]);
+ x6 = LE32(seed32[2]);
+ x7 = LE32(seed32[3]);
+ x8 = LE32(seed32[4]);
+ x9 = LE32(seed32[5]);
+ x10 = LE32(seed32[6]);
+ x11 = LE32(seed32[7]);
+ x12 = idx;
+ x13 = idx >> 32;
+ x14 = 0;
+ x15 = over_count;
+
+ n = 10;
+ while (n--) {
+ QUARTERROUND(x0, x4, x8,x12)
+ QUARTERROUND(x1, x5, x9,x13)
+ QUARTERROUND(x2, x6,x10,x14)
+ QUARTERROUND(x3, x7,x11,x15)
+ QUARTERROUND(x0, x5,x10,x15)
+ QUARTERROUND(x1, x6,x11,x12)
+ QUARTERROUND(x2, x7, x8,x13)
+ QUARTERROUND(x3, x4, x9,x14)
+ }
+
+ x0 += 0x61707865;
+ x1 += 0x3320646e;
+ x2 += 0x79622d32;
+ x3 += 0x6b206574;
+ x4 += LE32(seed32[0]);
+ x5 += LE32(seed32[1]);
+ x6 += LE32(seed32[2]);
+ x7 += LE32(seed32[3]);
+ x8 += LE32(seed32[4]);
+ x9 += LE32(seed32[5]);
+ x10 += LE32(seed32[6]);
+ x11 += LE32(seed32[7]);
+ x12 += idx;
+ x13 += idx >> 32;
+ x14 += 0;
+ x15 += over_count;
+
+ r1->d[3] = BE32((uint64_t) x0) << 32 | BE32(x1);
+ r1->d[2] = BE32((uint64_t) x2) << 32 | BE32(x3);
+ r1->d[1] = BE32((uint64_t) x4) << 32 | BE32(x5);
+ r1->d[0] = BE32((uint64_t) x6) << 32 | BE32(x7);
+ r2->d[3] = BE32((uint64_t) x8) << 32 | BE32(x9);
+ r2->d[2] = BE32((uint64_t) x10) << 32 | BE32(x11);
+ r2->d[1] = BE32((uint64_t) x12) << 32 | BE32(x13);
+ r2->d[0] = BE32((uint64_t) x14) << 32 | BE32(x15);
+
+ over1 = secp256k1_scalar_check_overflow(r1);
+ over2 = secp256k1_scalar_check_overflow(r2);
+ over_count++;
+ } while (over1 | over2);
+}
+
+#undef ROTL32
+#undef QUARTERROUND
+#undef BE32
+#undef LE32
+
+#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h
new file mode 100644
index 0000000..2c9a348
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h
@@ -0,0 +1,19 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SCALAR_REPR_H
+#define SECP256K1_SCALAR_REPR_H
+
+#include
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef struct {
+ uint32_t d[8];
+} secp256k1_scalar;
+
+#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}}
+
+#endif /* SECP256K1_SCALAR_REPR_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h
new file mode 100644
index 0000000..9cf5c54
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h
@@ -0,0 +1,832 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SCALAR_REPR_IMPL_H
+#define SECP256K1_SCALAR_REPR_IMPL_H
+
+#include
+
+/* Limbs of the secp256k1 order. */
+#define SECP256K1_N_0 ((uint32_t)0xD0364141UL)
+#define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL)
+#define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL)
+#define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL)
+#define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL)
+#define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL)
+
+/* Limbs of 2^256 minus the secp256k1 order. */
+#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
+#define SECP256K1_N_C_1 (~SECP256K1_N_1)
+#define SECP256K1_N_C_2 (~SECP256K1_N_2)
+#define SECP256K1_N_C_3 (~SECP256K1_N_3)
+#define SECP256K1_N_C_4 (1)
+
+/* Limbs of half the secp256k1 order. */
+#define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL)
+#define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL)
+#define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL)
+#define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL)
+#define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL)
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
+ r->d[0] = 0;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+ r->d[4] = 0;
+ r->d[5] = 0;
+ r->d[6] = 0;
+ r->d[7] = 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
+ r->d[0] = v;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+ r->d[4] = 0;
+ r->d[5] = 0;
+ r->d[6] = 0;
+ r->d[7] = 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_set_u64(secp256k1_scalar *r, uint64_t v) {
+ r->d[0] = v;
+ r->d[1] = v >> 32;
+ r->d[2] = 0;
+ r->d[3] = 0;
+ r->d[4] = 0;
+ r->d[5] = 0;
+ r->d[6] = 0;
+ r->d[7] = 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5);
+ return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK(count < 32);
+ VERIFY_CHECK(offset + count <= 256);
+ if ((offset + count - 1) >> 5 == offset >> 5) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+ } else {
+ VERIFY_CHECK((offset >> 5) + 1 < 8);
+ return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1);
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */
+ no |= (a->d[6] < SECP256K1_N_6); /* No need for a > check. */
+ no |= (a->d[5] < SECP256K1_N_5); /* No need for a > check. */
+ no |= (a->d[4] < SECP256K1_N_4);
+ yes |= (a->d[4] > SECP256K1_N_4) & ~no;
+ no |= (a->d[3] < SECP256K1_N_3) & ~yes;
+ yes |= (a->d[3] > SECP256K1_N_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_2) & ~yes;
+ yes |= (a->d[2] > SECP256K1_N_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_1) & ~no;
+ yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
+ return yes;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow) {
+ uint64_t t;
+ VERIFY_CHECK(overflow <= 1);
+ t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0;
+ r->d[0] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[1] + overflow * SECP256K1_N_C_1;
+ r->d[1] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[2] + overflow * SECP256K1_N_C_2;
+ r->d[2] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[3] + overflow * SECP256K1_N_C_3;
+ r->d[3] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[4] + overflow * SECP256K1_N_C_4;
+ r->d[4] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[5];
+ r->d[5] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[6];
+ r->d[6] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[7];
+ r->d[7] = t & 0xFFFFFFFFUL;
+ return overflow;
+}
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ int overflow;
+ uint64_t t = (uint64_t)a->d[0] + b->d[0];
+ r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[1] + b->d[1];
+ r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[2] + b->d[2];
+ r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[3] + b->d[3];
+ r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[4] + b->d[4];
+ r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[5] + b->d[5];
+ r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[6] + b->d[6];
+ r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[7] + b->d[7];
+ r->d[7] = t & 0xFFFFFFFFULL; t >>= 32;
+ overflow = t + secp256k1_scalar_check_overflow(r);
+ VERIFY_CHECK(overflow == 0 || overflow == 1);
+ secp256k1_scalar_reduce(r, overflow);
+ return overflow;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ uint64_t t;
+ VERIFY_CHECK(bit < 256);
+ bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */
+ t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F));
+ r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F));
+ r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F));
+ r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F));
+ r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F));
+ r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F));
+ r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F));
+ r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F));
+ r->d[7] = t & 0xFFFFFFFFULL;
+#ifdef VERIFY
+ VERIFY_CHECK((t >> 32) == 0);
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ int over;
+ r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24;
+ r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24;
+ r->d[2] = (uint32_t)b32[23] | (uint32_t)b32[22] << 8 | (uint32_t)b32[21] << 16 | (uint32_t)b32[20] << 24;
+ r->d[3] = (uint32_t)b32[19] | (uint32_t)b32[18] << 8 | (uint32_t)b32[17] << 16 | (uint32_t)b32[16] << 24;
+ r->d[4] = (uint32_t)b32[15] | (uint32_t)b32[14] << 8 | (uint32_t)b32[13] << 16 | (uint32_t)b32[12] << 24;
+ r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24;
+ r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24;
+ r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24;
+ over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
+ if (overflow) {
+ *overflow = over;
+ }
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7];
+ bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6];
+ bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5];
+ bin[12] = a->d[4] >> 24; bin[13] = a->d[4] >> 16; bin[14] = a->d[4] >> 8; bin[15] = a->d[4];
+ bin[16] = a->d[3] >> 24; bin[17] = a->d[3] >> 16; bin[18] = a->d[3] >> 8; bin[19] = a->d[3];
+ bin[20] = a->d[2] >> 24; bin[21] = a->d[2] >> 16; bin[22] = a->d[2] >> 8; bin[23] = a->d[2];
+ bin[24] = a->d[1] >> 24; bin[25] = a->d[1] >> 16; bin[26] = a->d[1] >> 8; bin[27] = a->d[1];
+ bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0);
+ uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1;
+ r->d[0] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[1]) + SECP256K1_N_1;
+ r->d[1] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[2]) + SECP256K1_N_2;
+ r->d[2] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[3]) + SECP256K1_N_3;
+ r->d[3] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[4]) + SECP256K1_N_4;
+ r->d[4] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[5]) + SECP256K1_N_5;
+ r->d[5] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[6]) + SECP256K1_N_6;
+ r->d[6] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[7]) + SECP256K1_N_7;
+ r->d[7] = t & nonzero;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[7] < SECP256K1_N_H_7);
+ yes |= (a->d[7] > SECP256K1_N_H_7) & ~no;
+ no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */
+ no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; /* No need for a > check. */
+ no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; /* No need for a > check. */
+ no |= (a->d[3] < SECP256K1_N_H_3) & ~yes;
+ yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_H_2) & ~yes;
+ yes |= (a->d[2] > SECP256K1_N_H_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
+ yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
+ return yes;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ /* If we are flag = 0, mask = 00...00 and this is a no-op;
+ * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
+ uint32_t mask = !flag - 1;
+ uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0);
+ uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
+ r->d[0] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
+ r->d[1] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask);
+ r->d[2] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask);
+ r->d[3] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[4] ^ mask) + (SECP256K1_N_4 & mask);
+ r->d[4] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[5] ^ mask) + (SECP256K1_N_5 & mask);
+ r->d[5] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[6] ^ mask) + (SECP256K1_N_6 & mask);
+ r->d[6] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask);
+ r->d[7] = t & nonzero;
+ return 2 * (mask == 0) - 1;
+}
+
+
+/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
+
+/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd(a,b) { \
+ uint32_t tl, th; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c1 += th; /* overflow is handled on the next line */ \
+ c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
+}
+
+/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
+#define muladd_fast(a,b) { \
+ uint32_t tl, th; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c1 += th; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK(c1 >= th); \
+}
+
+/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd2(a,b) { \
+ uint32_t tl, th, th2, tl2; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \
+ c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
+ tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \
+ th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c0 += tl2; /* overflow is handled on the next line */ \
+ th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
+ c1 += th2; /* overflow is handled on the next line */ \
+ c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
+}
+
+/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define sumadd(a) { \
+ unsigned int over; \
+ c0 += (a); /* overflow is handled on the next line */ \
+ over = (c0 < (a)) ? 1 : 0; \
+ c1 += over; /* overflow is handled on the next line */ \
+ c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+}
+
+/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
+#define sumadd_fast(a) { \
+ c0 += (a); /* overflow is handled on the next line */ \
+ c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. */
+#define extract(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = c2; \
+ c2 = 0; \
+}
+
+/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. c2 is required to be zero. */
+#define extract_fast(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = 0; \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l) {
+ uint64_t c;
+ uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15];
+ uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12;
+ uint32_t p0, p1, p2, p3, p4, p5, p6, p7, p8;
+
+ /* 96 bit accumulator. */
+ uint32_t c0, c1, c2;
+
+ /* Reduce 512 bits into 385. */
+ /* m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. */
+ c0 = l[0]; c1 = 0; c2 = 0;
+ muladd_fast(n0, SECP256K1_N_C_0);
+ extract_fast(m0);
+ sumadd_fast(l[1]);
+ muladd(n1, SECP256K1_N_C_0);
+ muladd(n0, SECP256K1_N_C_1);
+ extract(m1);
+ sumadd(l[2]);
+ muladd(n2, SECP256K1_N_C_0);
+ muladd(n1, SECP256K1_N_C_1);
+ muladd(n0, SECP256K1_N_C_2);
+ extract(m2);
+ sumadd(l[3]);
+ muladd(n3, SECP256K1_N_C_0);
+ muladd(n2, SECP256K1_N_C_1);
+ muladd(n1, SECP256K1_N_C_2);
+ muladd(n0, SECP256K1_N_C_3);
+ extract(m3);
+ sumadd(l[4]);
+ muladd(n4, SECP256K1_N_C_0);
+ muladd(n3, SECP256K1_N_C_1);
+ muladd(n2, SECP256K1_N_C_2);
+ muladd(n1, SECP256K1_N_C_3);
+ sumadd(n0);
+ extract(m4);
+ sumadd(l[5]);
+ muladd(n5, SECP256K1_N_C_0);
+ muladd(n4, SECP256K1_N_C_1);
+ muladd(n3, SECP256K1_N_C_2);
+ muladd(n2, SECP256K1_N_C_3);
+ sumadd(n1);
+ extract(m5);
+ sumadd(l[6]);
+ muladd(n6, SECP256K1_N_C_0);
+ muladd(n5, SECP256K1_N_C_1);
+ muladd(n4, SECP256K1_N_C_2);
+ muladd(n3, SECP256K1_N_C_3);
+ sumadd(n2);
+ extract(m6);
+ sumadd(l[7]);
+ muladd(n7, SECP256K1_N_C_0);
+ muladd(n6, SECP256K1_N_C_1);
+ muladd(n5, SECP256K1_N_C_2);
+ muladd(n4, SECP256K1_N_C_3);
+ sumadd(n3);
+ extract(m7);
+ muladd(n7, SECP256K1_N_C_1);
+ muladd(n6, SECP256K1_N_C_2);
+ muladd(n5, SECP256K1_N_C_3);
+ sumadd(n4);
+ extract(m8);
+ muladd(n7, SECP256K1_N_C_2);
+ muladd(n6, SECP256K1_N_C_3);
+ sumadd(n5);
+ extract(m9);
+ muladd(n7, SECP256K1_N_C_3);
+ sumadd(n6);
+ extract(m10);
+ sumadd_fast(n7);
+ extract_fast(m11);
+ VERIFY_CHECK(c0 <= 1);
+ m12 = c0;
+
+ /* Reduce 385 bits into 258. */
+ /* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */
+ c0 = m0; c1 = 0; c2 = 0;
+ muladd_fast(m8, SECP256K1_N_C_0);
+ extract_fast(p0);
+ sumadd_fast(m1);
+ muladd(m9, SECP256K1_N_C_0);
+ muladd(m8, SECP256K1_N_C_1);
+ extract(p1);
+ sumadd(m2);
+ muladd(m10, SECP256K1_N_C_0);
+ muladd(m9, SECP256K1_N_C_1);
+ muladd(m8, SECP256K1_N_C_2);
+ extract(p2);
+ sumadd(m3);
+ muladd(m11, SECP256K1_N_C_0);
+ muladd(m10, SECP256K1_N_C_1);
+ muladd(m9, SECP256K1_N_C_2);
+ muladd(m8, SECP256K1_N_C_3);
+ extract(p3);
+ sumadd(m4);
+ muladd(m12, SECP256K1_N_C_0);
+ muladd(m11, SECP256K1_N_C_1);
+ muladd(m10, SECP256K1_N_C_2);
+ muladd(m9, SECP256K1_N_C_3);
+ sumadd(m8);
+ extract(p4);
+ sumadd(m5);
+ muladd(m12, SECP256K1_N_C_1);
+ muladd(m11, SECP256K1_N_C_2);
+ muladd(m10, SECP256K1_N_C_3);
+ sumadd(m9);
+ extract(p5);
+ sumadd(m6);
+ muladd(m12, SECP256K1_N_C_2);
+ muladd(m11, SECP256K1_N_C_3);
+ sumadd(m10);
+ extract(p6);
+ sumadd_fast(m7);
+ muladd_fast(m12, SECP256K1_N_C_3);
+ sumadd_fast(m11);
+ extract_fast(p7);
+ p8 = c0 + m12;
+ VERIFY_CHECK(p8 <= 2);
+
+ /* Reduce 258 bits into 256. */
+ /* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */
+ c = p0 + (uint64_t)SECP256K1_N_C_0 * p8;
+ r->d[0] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p1 + (uint64_t)SECP256K1_N_C_1 * p8;
+ r->d[1] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p2 + (uint64_t)SECP256K1_N_C_2 * p8;
+ r->d[2] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p3 + (uint64_t)SECP256K1_N_C_3 * p8;
+ r->d[3] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p4 + (uint64_t)p8;
+ r->d[4] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p5;
+ r->d[5] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p6;
+ r->d[6] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p7;
+ r->d[7] = c & 0xFFFFFFFFUL; c >>= 32;
+
+ /* Final reduction of r. */
+ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
+}
+
+static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ /* 96 bit accumulator. */
+ uint32_t c0 = 0, c1 = 0, c2 = 0;
+
+ /* l[0..15] = a[0..7] * b[0..7]. */
+ muladd_fast(a->d[0], b->d[0]);
+ extract_fast(l[0]);
+ muladd(a->d[0], b->d[1]);
+ muladd(a->d[1], b->d[0]);
+ extract(l[1]);
+ muladd(a->d[0], b->d[2]);
+ muladd(a->d[1], b->d[1]);
+ muladd(a->d[2], b->d[0]);
+ extract(l[2]);
+ muladd(a->d[0], b->d[3]);
+ muladd(a->d[1], b->d[2]);
+ muladd(a->d[2], b->d[1]);
+ muladd(a->d[3], b->d[0]);
+ extract(l[3]);
+ muladd(a->d[0], b->d[4]);
+ muladd(a->d[1], b->d[3]);
+ muladd(a->d[2], b->d[2]);
+ muladd(a->d[3], b->d[1]);
+ muladd(a->d[4], b->d[0]);
+ extract(l[4]);
+ muladd(a->d[0], b->d[5]);
+ muladd(a->d[1], b->d[4]);
+ muladd(a->d[2], b->d[3]);
+ muladd(a->d[3], b->d[2]);
+ muladd(a->d[4], b->d[1]);
+ muladd(a->d[5], b->d[0]);
+ extract(l[5]);
+ muladd(a->d[0], b->d[6]);
+ muladd(a->d[1], b->d[5]);
+ muladd(a->d[2], b->d[4]);
+ muladd(a->d[3], b->d[3]);
+ muladd(a->d[4], b->d[2]);
+ muladd(a->d[5], b->d[1]);
+ muladd(a->d[6], b->d[0]);
+ extract(l[6]);
+ muladd(a->d[0], b->d[7]);
+ muladd(a->d[1], b->d[6]);
+ muladd(a->d[2], b->d[5]);
+ muladd(a->d[3], b->d[4]);
+ muladd(a->d[4], b->d[3]);
+ muladd(a->d[5], b->d[2]);
+ muladd(a->d[6], b->d[1]);
+ muladd(a->d[7], b->d[0]);
+ extract(l[7]);
+ muladd(a->d[1], b->d[7]);
+ muladd(a->d[2], b->d[6]);
+ muladd(a->d[3], b->d[5]);
+ muladd(a->d[4], b->d[4]);
+ muladd(a->d[5], b->d[3]);
+ muladd(a->d[6], b->d[2]);
+ muladd(a->d[7], b->d[1]);
+ extract(l[8]);
+ muladd(a->d[2], b->d[7]);
+ muladd(a->d[3], b->d[6]);
+ muladd(a->d[4], b->d[5]);
+ muladd(a->d[5], b->d[4]);
+ muladd(a->d[6], b->d[3]);
+ muladd(a->d[7], b->d[2]);
+ extract(l[9]);
+ muladd(a->d[3], b->d[7]);
+ muladd(a->d[4], b->d[6]);
+ muladd(a->d[5], b->d[5]);
+ muladd(a->d[6], b->d[4]);
+ muladd(a->d[7], b->d[3]);
+ extract(l[10]);
+ muladd(a->d[4], b->d[7]);
+ muladd(a->d[5], b->d[6]);
+ muladd(a->d[6], b->d[5]);
+ muladd(a->d[7], b->d[4]);
+ extract(l[11]);
+ muladd(a->d[5], b->d[7]);
+ muladd(a->d[6], b->d[6]);
+ muladd(a->d[7], b->d[5]);
+ extract(l[12]);
+ muladd(a->d[6], b->d[7]);
+ muladd(a->d[7], b->d[6]);
+ extract(l[13]);
+ muladd_fast(a->d[7], b->d[7]);
+ extract_fast(l[14]);
+ VERIFY_CHECK(c1 == 0);
+ l[15] = c0;
+}
+
+static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) {
+ /* 96 bit accumulator. */
+ uint32_t c0 = 0, c1 = 0, c2 = 0;
+
+ /* l[0..15] = a[0..7]^2. */
+ muladd_fast(a->d[0], a->d[0]);
+ extract_fast(l[0]);
+ muladd2(a->d[0], a->d[1]);
+ extract(l[1]);
+ muladd2(a->d[0], a->d[2]);
+ muladd(a->d[1], a->d[1]);
+ extract(l[2]);
+ muladd2(a->d[0], a->d[3]);
+ muladd2(a->d[1], a->d[2]);
+ extract(l[3]);
+ muladd2(a->d[0], a->d[4]);
+ muladd2(a->d[1], a->d[3]);
+ muladd(a->d[2], a->d[2]);
+ extract(l[4]);
+ muladd2(a->d[0], a->d[5]);
+ muladd2(a->d[1], a->d[4]);
+ muladd2(a->d[2], a->d[3]);
+ extract(l[5]);
+ muladd2(a->d[0], a->d[6]);
+ muladd2(a->d[1], a->d[5]);
+ muladd2(a->d[2], a->d[4]);
+ muladd(a->d[3], a->d[3]);
+ extract(l[6]);
+ muladd2(a->d[0], a->d[7]);
+ muladd2(a->d[1], a->d[6]);
+ muladd2(a->d[2], a->d[5]);
+ muladd2(a->d[3], a->d[4]);
+ extract(l[7]);
+ muladd2(a->d[1], a->d[7]);
+ muladd2(a->d[2], a->d[6]);
+ muladd2(a->d[3], a->d[5]);
+ muladd(a->d[4], a->d[4]);
+ extract(l[8]);
+ muladd2(a->d[2], a->d[7]);
+ muladd2(a->d[3], a->d[6]);
+ muladd2(a->d[4], a->d[5]);
+ extract(l[9]);
+ muladd2(a->d[3], a->d[7]);
+ muladd2(a->d[4], a->d[6]);
+ muladd(a->d[5], a->d[5]);
+ extract(l[10]);
+ muladd2(a->d[4], a->d[7]);
+ muladd2(a->d[5], a->d[6]);
+ extract(l[11]);
+ muladd2(a->d[5], a->d[7]);
+ muladd(a->d[6], a->d[6]);
+ extract(l[12]);
+ muladd2(a->d[6], a->d[7]);
+ extract(l[13]);
+ muladd_fast(a->d[7], a->d[7]);
+ extract_fast(l[14]);
+ VERIFY_CHECK(c1 == 0);
+ l[15] = c0;
+}
+
+#undef sumadd
+#undef sumadd_fast
+#undef muladd
+#undef muladd_fast
+#undef muladd2
+#undef extract
+#undef extract_fast
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ uint32_t l[16];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = r->d[0] & ((1 << n) - 1);
+ r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n));
+ r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n));
+ r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n));
+ r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n));
+ r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n));
+ r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n));
+ r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n));
+ r->d[7] = (r->d[7] >> n);
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint32_t l[16];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+#ifdef USE_ENDOMORPHISM
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ r1->d[0] = a->d[0];
+ r1->d[1] = a->d[1];
+ r1->d[2] = a->d[2];
+ r1->d[3] = a->d[3];
+ r1->d[4] = 0;
+ r1->d[5] = 0;
+ r1->d[6] = 0;
+ r1->d[7] = 0;
+ r2->d[0] = a->d[4];
+ r2->d[1] = a->d[5];
+ r2->d[2] = a->d[6];
+ r2->d[3] = a->d[7];
+ r2->d[4] = 0;
+ r2->d[5] = 0;
+ r2->d[6] = 0;
+ r2->d[7] = 0;
+}
+#endif
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
+ uint32_t l[16];
+ unsigned int shiftlimbs;
+ unsigned int shiftlow;
+ unsigned int shifthigh;
+ VERIFY_CHECK(shift >= 256);
+ secp256k1_scalar_mul_512(l, a, b);
+ shiftlimbs = shift >> 5;
+ shiftlow = shift & 0x1F;
+ shifthigh = 32 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
+ secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
+}
+
+#define ROTL32(x,n) ((x) << (n) | (x) >> (32-(n)))
+#define QUARTERROUND(a,b,c,d) \
+ a += b; d = ROTL32(d ^ a, 16); \
+ c += d; b = ROTL32(b ^ c, 12); \
+ a += b; d = ROTL32(d ^ a, 8); \
+ c += d; b = ROTL32(b ^ c, 7);
+
+#ifdef WORDS_BIGENDIAN
+#define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
+#define BE32(p) (p)
+#else
+#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
+#define LE32(p) (p)
+#endif
+
+static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t idx) {
+ size_t n;
+ size_t over_count = 0;
+ uint32_t seed32[8];
+ uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
+ int over1, over2;
+
+ memcpy((void *) seed32, (const void *) seed, 32);
+ do {
+ x0 = 0x61707865;
+ x1 = 0x3320646e;
+ x2 = 0x79622d32;
+ x3 = 0x6b206574;
+ x4 = LE32(seed32[0]);
+ x5 = LE32(seed32[1]);
+ x6 = LE32(seed32[2]);
+ x7 = LE32(seed32[3]);
+ x8 = LE32(seed32[4]);
+ x9 = LE32(seed32[5]);
+ x10 = LE32(seed32[6]);
+ x11 = LE32(seed32[7]);
+ x12 = idx;
+ x13 = idx >> 32;
+ x14 = 0;
+ x15 = over_count;
+
+ n = 10;
+ while (n--) {
+ QUARTERROUND(x0, x4, x8,x12)
+ QUARTERROUND(x1, x5, x9,x13)
+ QUARTERROUND(x2, x6,x10,x14)
+ QUARTERROUND(x3, x7,x11,x15)
+ QUARTERROUND(x0, x5,x10,x15)
+ QUARTERROUND(x1, x6,x11,x12)
+ QUARTERROUND(x2, x7, x8,x13)
+ QUARTERROUND(x3, x4, x9,x14)
+ }
+
+ x0 += 0x61707865;
+ x1 += 0x3320646e;
+ x2 += 0x79622d32;
+ x3 += 0x6b206574;
+ x4 += LE32(seed32[0]);
+ x5 += LE32(seed32[1]);
+ x6 += LE32(seed32[2]);
+ x7 += LE32(seed32[3]);
+ x8 += LE32(seed32[4]);
+ x9 += LE32(seed32[5]);
+ x10 += LE32(seed32[6]);
+ x11 += LE32(seed32[7]);
+ x12 += idx;
+ x13 += idx >> 32;
+ x14 += 0;
+ x15 += over_count;
+
+ r1->d[7] = BE32(x0);
+ r1->d[6] = BE32(x1);
+ r1->d[5] = BE32(x2);
+ r1->d[4] = BE32(x3);
+ r1->d[3] = BE32(x4);
+ r1->d[2] = BE32(x5);
+ r1->d[1] = BE32(x6);
+ r1->d[0] = BE32(x7);
+ r2->d[7] = BE32(x8);
+ r2->d[6] = BE32(x9);
+ r2->d[5] = BE32(x10);
+ r2->d[4] = BE32(x11);
+ r2->d[3] = BE32(x12);
+ r2->d[2] = BE32(x13);
+ r2->d[1] = BE32(x14);
+ r2->d[0] = BE32(x15);
+
+ over1 = secp256k1_scalar_check_overflow(r1);
+ over2 = secp256k1_scalar_check_overflow(r2);
+ over_count++;
+ } while (over1 | over2);
+}
+
+#undef ROTL32
+#undef QUARTERROUND
+#undef BE32
+#undef LE32
+
+#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_impl.h
new file mode 100644
index 0000000..fa79057
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_impl.h
@@ -0,0 +1,333 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SCALAR_IMPL_H
+#define SECP256K1_SCALAR_IMPL_H
+
+#include "group.h"
+#include "scalar.h"
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(EXHAUSTIVE_TEST_ORDER)
+#include "scalar_low_impl.h"
+#elif defined(USE_SCALAR_4X64)
+#include "scalar_4x64_impl.h"
+#elif defined(USE_SCALAR_8X32)
+#include "scalar_8x32_impl.h"
+#else
+#error "Please select scalar implementation"
+#endif
+
+#ifndef USE_NUM_NONE
+static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) {
+ unsigned char c[32];
+ secp256k1_scalar_get_b32(c, a);
+ secp256k1_num_set_bin(r, c, 32);
+}
+
+/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
+static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
+#if defined(EXHAUSTIVE_TEST_ORDER)
+ static const unsigned char order[32] = {
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
+ };
+#else
+ static const unsigned char order[32] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
+ };
+#endif
+ secp256k1_num_set_bin(r, order, 32);
+}
+#endif
+
+static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
+#if defined(EXHAUSTIVE_TEST_ORDER)
+ int i;
+ *r = 0;
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
+ if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
+ *r = i;
+ /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
+ * have a composite group order; fix it in exhaustive_tests.c). */
+ VERIFY_CHECK(*r != 0);
+}
+#else
+ secp256k1_scalar *t;
+ int i;
+ /* First compute xN as x ^ (2^N - 1) for some values of N,
+ * and uM as x ^ M for some values of M. */
+ secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
+ secp256k1_scalar u2, u5, u9, u11, u13;
+
+ secp256k1_scalar_sqr(&u2, x);
+ secp256k1_scalar_mul(&x2, &u2, x);
+ secp256k1_scalar_mul(&u5, &u2, &x2);
+ secp256k1_scalar_mul(&x3, &u5, &u2);
+ secp256k1_scalar_mul(&u9, &x3, &u2);
+ secp256k1_scalar_mul(&u11, &u9, &u2);
+ secp256k1_scalar_mul(&u13, &u11, &u2);
+
+ secp256k1_scalar_sqr(&x6, &u13);
+ secp256k1_scalar_sqr(&x6, &x6);
+ secp256k1_scalar_mul(&x6, &x6, &u11);
+
+ secp256k1_scalar_sqr(&x8, &x6);
+ secp256k1_scalar_sqr(&x8, &x8);
+ secp256k1_scalar_mul(&x8, &x8, &x2);
+
+ secp256k1_scalar_sqr(&x14, &x8);
+ for (i = 0; i < 5; i++) {
+ secp256k1_scalar_sqr(&x14, &x14);
+ }
+ secp256k1_scalar_mul(&x14, &x14, &x6);
+
+ secp256k1_scalar_sqr(&x28, &x14);
+ for (i = 0; i < 13; i++) {
+ secp256k1_scalar_sqr(&x28, &x28);
+ }
+ secp256k1_scalar_mul(&x28, &x28, &x14);
+
+ secp256k1_scalar_sqr(&x56, &x28);
+ for (i = 0; i < 27; i++) {
+ secp256k1_scalar_sqr(&x56, &x56);
+ }
+ secp256k1_scalar_mul(&x56, &x56, &x28);
+
+ secp256k1_scalar_sqr(&x112, &x56);
+ for (i = 0; i < 55; i++) {
+ secp256k1_scalar_sqr(&x112, &x112);
+ }
+ secp256k1_scalar_mul(&x112, &x112, &x56);
+
+ secp256k1_scalar_sqr(&x126, &x112);
+ for (i = 0; i < 13; i++) {
+ secp256k1_scalar_sqr(&x126, &x126);
+ }
+ secp256k1_scalar_mul(&x126, &x126, &x14);
+
+ /* Then accumulate the final result (t starts at x126). */
+ t = &x126;
+ for (i = 0; i < 3; i++) {
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u5); /* 101 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u5); /* 101 */
+ for (i = 0; i < 5; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u11); /* 1011 */
+ for (i = 0; i < 4; i++) {
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u11); /* 1011 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 5; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 6; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u13); /* 1101 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u5); /* 101 */
+ for (i = 0; i < 3; i++) {
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 5; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u9); /* 1001 */
+ for (i = 0; i < 6; i++) { /* 000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u5); /* 101 */
+ for (i = 0; i < 10; i++) { /* 0000000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 9; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
+ for (i = 0; i < 5; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u9); /* 1001 */
+ for (i = 0; i < 6; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u11); /* 1011 */
+ for (i = 0; i < 4; i++) {
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u13); /* 1101 */
+ for (i = 0; i < 5; i++) {
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 6; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u13); /* 1101 */
+ for (i = 0; i < 10; i++) { /* 000000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u13); /* 1101 */
+ for (i = 0; i < 4; i++) {
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &u9); /* 1001 */
+ for (i = 0; i < 6; i++) { /* 00000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 8; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(r, t, &x6); /* 111111 */
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
+ return !(a->d[0] & 1);
+}
+#endif
+
+static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
+#if defined(USE_SCALAR_INV_BUILTIN)
+ secp256k1_scalar_inverse(r, x);
+#elif defined(USE_SCALAR_INV_NUM)
+ unsigned char b[32];
+ secp256k1_num n, m;
+ secp256k1_scalar t = *x;
+ secp256k1_scalar_get_b32(b, &t);
+ secp256k1_num_set_bin(&n, b, 32);
+ secp256k1_scalar_order_get_num(&m);
+ secp256k1_num_mod_inverse(&n, &n, &m);
+ secp256k1_num_get_bin(b, 32, &n);
+ secp256k1_scalar_set_b32(r, b, NULL);
+ /* Verify that the inverse was computed correctly, without GMP code. */
+ secp256k1_scalar_mul(&t, &t, r);
+ CHECK(secp256k1_scalar_is_one(&t));
+#else
+#error "Please select scalar inverse implementation"
+#endif
+}
+
+#ifdef USE_ENDOMORPHISM
+#if defined(EXHAUSTIVE_TEST_ORDER)
+/**
+ * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
+ * full case we don't bother making k1 and k2 be small, we just want them to be
+ * nontrivial to get full test coverage for the exhaustive tests. We therefore
+ * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
+ */
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
+ *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
+}
+#else
+/**
+ * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
+ * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
+ * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72}
+ *
+ * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
+ * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
+ * and k2 have a small size.
+ * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
+ *
+ * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
+ * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
+ * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
+ * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
+ *
+ * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
+ * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
+ * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
+ *
+ * g1, g2 are precomputed constants used to replace division with a rounded multiplication
+ * when decomposing the scalar for an endomorphism-based point multiplication.
+ *
+ * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve
+ * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5.
+ *
+ * The derivation is described in the paper "Efficient Software Implementation of Public-Key
+ * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
+ * Section 4.3 (here we use a somewhat higher-precision estimate):
+ * d = a1*b2 - b1*a2
+ * g1 = round((2^272)*b2/d)
+ * g2 = round((2^272)*b1/d)
+ *
+ * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
+ * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
+ *
+ * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order).
+ */
+
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ secp256k1_scalar c1, c2;
+ static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST(
+ 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
+ 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL
+ );
+ static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
+ 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
+ );
+ static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
+ 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
+ );
+ static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL,
+ 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL
+ );
+ static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL,
+ 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL
+ );
+ VERIFY_CHECK(r1 != a);
+ VERIFY_CHECK(r2 != a);
+ /* these _var calls are constant time since the shift amount is constant */
+ secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
+ secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
+ secp256k1_scalar_mul(&c1, &c1, &minus_b1);
+ secp256k1_scalar_mul(&c2, &c2, &minus_b2);
+ secp256k1_scalar_add(r2, &c1, &c2);
+ secp256k1_scalar_mul(r1, r2, &minus_lambda);
+ secp256k1_scalar_add(r1, r1, a);
+}
+#endif
+#endif
+
+#endif /* SECP256K1_SCALAR_IMPL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_low.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_low.h
new file mode 100644
index 0000000..5836feb
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_low.h
@@ -0,0 +1,15 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SCALAR_REPR_H
+#define SECP256K1_SCALAR_REPR_H
+
+#include
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef uint32_t secp256k1_scalar;
+
+#endif /* SECP256K1_SCALAR_REPR_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_low_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_low_impl.h
new file mode 100644
index 0000000..d6fdead
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scalar_low_impl.h
@@ -0,0 +1,120 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SCALAR_REPR_IMPL_H
+#define SECP256K1_SCALAR_REPR_IMPL_H
+
+#include "scalar.h"
+
+#include
+
+SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
+ return !(*a & 1);
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; }
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; }
+SECP256K1_INLINE static void secp256k1_scalar_set_u64(secp256k1_scalar *r, uint64_t v) { *r = v % EXHAUSTIVE_TEST_ORDER; }
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ if (offset < 32)
+ return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
+ else
+ return 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
+ return *r < *b;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ if (flag && bit < 32)
+ *r += (1 << bit);
+#ifdef VERIFY
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
+ int i;
+ *r = 0;
+ for (i = 0; i < 32; i++) {
+ *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
+ }
+ /* just deny overflow, it basically always happens */
+ if (overflow) *overflow = 0;
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ memset(bin, 0, 32);
+ bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return *a == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ if (*a == 0) {
+ *r = 0;
+ } else {
+ *r = EXHAUSTIVE_TEST_ORDER - *a;
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return *a == 1;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ return *a > EXHAUSTIVE_TEST_ORDER / 2;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ if (flag) secp256k1_scalar_negate(r, r);
+ return flag ? -1 : 1;
+}
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = *r & ((1 << n) - 1);
+ *r >>= n;
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER;
+}
+
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ *r1 = *a;
+ *r2 = 0;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return *a == *b;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t n) {
+ *r1 = (seed[0] + n) % EXHAUSTIVE_TEST_ORDER;
+ *r2 = (seed[1] + n) % EXHAUSTIVE_TEST_ORDER;
+}
+
+#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scratch.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scratch.h
new file mode 100644
index 0000000..fef377a
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scratch.h
@@ -0,0 +1,39 @@
+/**********************************************************************
+ * Copyright (c) 2017 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCRATCH_
+#define _SECP256K1_SCRATCH_
+
+#define SECP256K1_SCRATCH_MAX_FRAMES 5
+
+/* The typedef is used internally; the struct name is used in the public API
+ * (where it is exposed as a different typedef) */
+typedef struct secp256k1_scratch_space_struct {
+ void *data[SECP256K1_SCRATCH_MAX_FRAMES];
+ size_t offset[SECP256K1_SCRATCH_MAX_FRAMES];
+ size_t frame_size[SECP256K1_SCRATCH_MAX_FRAMES];
+ size_t frame;
+ size_t max_size;
+ const secp256k1_callback* error_callback;
+} secp256k1_scratch;
+
+static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size);
+
+static void secp256k1_scratch_destroy(secp256k1_scratch* scratch);
+
+/** Attempts to allocate a new stack frame with `n` available bytes. Returns 1 on success, 0 on failure */
+static int secp256k1_scratch_allocate_frame(secp256k1_scratch* scratch, size_t n, size_t objects);
+
+/** Deallocates a stack frame */
+static void secp256k1_scratch_deallocate_frame(secp256k1_scratch* scratch);
+
+/** Returns the maximum allocation the scratch space will allow */
+static size_t secp256k1_scratch_max_allocation(const secp256k1_scratch* scratch, size_t n_objects);
+
+/** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */
+static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t n);
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scratch_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scratch_impl.h
new file mode 100644
index 0000000..abed713
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/scratch_impl.h
@@ -0,0 +1,86 @@
+/**********************************************************************
+ * Copyright (c) 2017 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCRATCH_IMPL_H_
+#define _SECP256K1_SCRATCH_IMPL_H_
+
+#include "scratch.h"
+
+/* Using 16 bytes alignment because common architectures never have alignment
+ * requirements above 8 for any of the types we care about. In addition we
+ * leave some room because currently we don't care about a few bytes.
+ * TODO: Determine this at configure time. */
+#define ALIGNMENT 16
+
+static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size) {
+ secp256k1_scratch* ret = (secp256k1_scratch*)checked_malloc(error_callback, sizeof(*ret));
+ if (ret != NULL) {
+ memset(ret, 0, sizeof(*ret));
+ ret->max_size = max_size;
+ ret->error_callback = error_callback;
+ }
+ return ret;
+}
+
+static void secp256k1_scratch_destroy(secp256k1_scratch* scratch) {
+ if (scratch != NULL) {
+ VERIFY_CHECK(scratch->frame == 0);
+ free(scratch);
+ }
+}
+
+static size_t secp256k1_scratch_max_allocation(const secp256k1_scratch* scratch, size_t objects) {
+ size_t i = 0;
+ size_t allocated = 0;
+ for (i = 0; i < scratch->frame; i++) {
+ allocated += scratch->frame_size[i];
+ }
+ if (scratch->max_size - allocated <= objects * ALIGNMENT) {
+ return 0;
+ }
+ return scratch->max_size - allocated - objects * ALIGNMENT;
+}
+
+static int secp256k1_scratch_allocate_frame(secp256k1_scratch* scratch, size_t n, size_t objects) {
+ VERIFY_CHECK(scratch->frame < SECP256K1_SCRATCH_MAX_FRAMES);
+
+ if (n <= secp256k1_scratch_max_allocation(scratch, objects)) {
+ n += objects * ALIGNMENT;
+ scratch->data[scratch->frame] = checked_malloc(scratch->error_callback, n);
+ if (scratch->data[scratch->frame] == NULL) {
+ return 0;
+ }
+ scratch->frame_size[scratch->frame] = n;
+ scratch->offset[scratch->frame] = 0;
+ scratch->frame++;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void secp256k1_scratch_deallocate_frame(secp256k1_scratch* scratch) {
+ VERIFY_CHECK(scratch->frame > 0);
+ scratch->frame -= 1;
+ free(scratch->data[scratch->frame]);
+}
+
+static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t size) {
+ void *ret;
+ size_t frame = scratch->frame - 1;
+ size = ((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
+
+ if (scratch->frame == 0 || size + scratch->offset[frame] > scratch->frame_size[frame]) {
+ return NULL;
+ }
+ ret = (void *) ((unsigned char *) scratch->data[frame] + scratch->offset[frame]);
+ memset(ret, 0, size);
+ scratch->offset[frame] += size;
+
+ return ret;
+}
+
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/secp256k1.c b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/secp256k1.c
new file mode 100644
index 0000000..159768a
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/secp256k1.c
@@ -0,0 +1,637 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#include "include/secp256k1.h"
+
+#include "util.h"
+#include "num_impl.h"
+#include "field_impl.h"
+#include "scalar_impl.h"
+#include "group_impl.h"
+#include "ecmult_impl.h"
+#include "ecmult_const_impl.h"
+#include "ecmult_gen_impl.h"
+#include "ecdsa_impl.h"
+#include "eckey_impl.h"
+#include "hash_impl.h"
+#include "scratch_impl.h"
+
+#ifdef ENABLE_MODULE_GENERATOR
+# include "include/secp256k1_generator.h"
+#endif
+
+#ifdef ENABLE_MODULE_COMMITMENT
+# include "include/secp256k1_commitment.h"
+#endif
+
+#ifdef ENABLE_MODULE_RANGEPROOF
+# include "include/secp256k1_rangeproof.h"
+#endif
+
+#ifdef ENABLE_MODULE_BULLETPROOF
+# include "include/secp256k1_bulletproofs.h"
+#endif
+
+#define ARG_CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ secp256k1_callback_call(&ctx->illegal_callback, #cond); \
+ return 0; \
+ } \
+} while(0)
+
+static void default_illegal_callback_fn(const char* str, void* data) {
+ (void)data;
+ fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str);
+ abort();
+}
+
+static const secp256k1_callback default_illegal_callback = {
+ default_illegal_callback_fn,
+ NULL
+};
+
+static void default_error_callback_fn(const char* str, void* data) {
+ (void)data;
+ fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str);
+ abort();
+}
+
+static const secp256k1_callback default_error_callback = {
+ default_error_callback_fn,
+ NULL
+};
+
+
+struct secp256k1_context_struct {
+ secp256k1_ecmult_context ecmult_ctx;
+ secp256k1_ecmult_gen_context ecmult_gen_ctx;
+ secp256k1_callback illegal_callback;
+ secp256k1_callback error_callback;
+};
+
+secp256k1_context* secp256k1_context_create(unsigned int flags) {
+ secp256k1_context* ret = (secp256k1_context*)checked_malloc(&default_error_callback, sizeof(secp256k1_context));
+ ret->illegal_callback = default_illegal_callback;
+ ret->error_callback = default_error_callback;
+
+ if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
+ secp256k1_callback_call(&ret->illegal_callback,
+ "Invalid flags");
+ free(ret);
+ return NULL;
+ }
+
+ secp256k1_ecmult_context_init(&ret->ecmult_ctx);
+ secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx);
+
+ if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
+ secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &ret->error_callback);
+ }
+ if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
+ secp256k1_ecmult_context_build(&ret->ecmult_ctx, &ret->error_callback);
+ }
+
+ return ret;
+}
+
+secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) {
+ secp256k1_context* ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, sizeof(secp256k1_context));
+ ret->illegal_callback = ctx->illegal_callback;
+ ret->error_callback = ctx->error_callback;
+ secp256k1_ecmult_context_clone(&ret->ecmult_ctx, &ctx->ecmult_ctx, &ctx->error_callback);
+ secp256k1_ecmult_gen_context_clone(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx, &ctx->error_callback);
+ return ret;
+}
+
+void secp256k1_context_destroy(secp256k1_context* ctx) {
+ if (ctx != NULL) {
+ secp256k1_ecmult_context_clear(&ctx->ecmult_ctx);
+ secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx);
+
+ free(ctx);
+ }
+}
+
+void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
+ if (fun == NULL) {
+ fun = default_illegal_callback_fn;
+ }
+ ctx->illegal_callback.fn = fun;
+ ctx->illegal_callback.data = data;
+}
+
+void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
+ if (fun == NULL) {
+ fun = default_error_callback_fn;
+ }
+ ctx->error_callback.fn = fun;
+ ctx->error_callback.data = data;
+}
+
+secp256k1_scratch_space* secp256k1_scratch_space_create(const secp256k1_context* ctx, size_t max_size) {
+ VERIFY_CHECK(ctx != NULL);
+ return secp256k1_scratch_create(&ctx->error_callback, max_size);
+}
+
+void secp256k1_scratch_space_destroy(secp256k1_scratch_space* scratch) {
+ secp256k1_scratch_destroy(scratch);
+}
+
+static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) {
+ if (sizeof(secp256k1_ge_storage) == 64) {
+ /* When the secp256k1_ge_storage type is exactly 64 byte, use its
+ * representation inside secp256k1_pubkey, as conversion is very fast.
+ * Note that secp256k1_pubkey_save must use the same representation. */
+ secp256k1_ge_storage s;
+ memcpy(&s, &pubkey->data[0], sizeof(s));
+ secp256k1_ge_from_storage(ge, &s);
+ } else {
+ /* Otherwise, fall back to 32-byte big endian for X and Y. */
+ secp256k1_fe x, y;
+ secp256k1_fe_set_b32(&x, pubkey->data);
+ secp256k1_fe_set_b32(&y, pubkey->data + 32);
+ secp256k1_ge_set_xy(ge, &x, &y);
+ }
+ ARG_CHECK(!secp256k1_fe_is_zero(&ge->x));
+ return 1;
+}
+
+static void secp256k1_pubkey_save(secp256k1_pubkey* pubkey, secp256k1_ge* ge) {
+ if (sizeof(secp256k1_ge_storage) == 64) {
+ secp256k1_ge_storage s;
+ secp256k1_ge_to_storage(&s, ge);
+ memcpy(&pubkey->data[0], &s, sizeof(s));
+ } else {
+ VERIFY_CHECK(!secp256k1_ge_is_infinity(ge));
+ secp256k1_fe_normalize_var(&ge->x);
+ secp256k1_fe_normalize_var(&ge->y);
+ secp256k1_fe_get_b32(pubkey->data, &ge->x);
+ secp256k1_fe_get_b32(pubkey->data + 32, &ge->y);
+ }
+}
+
+int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const unsigned char *input, size_t inputlen) {
+ secp256k1_ge Q;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(pubkey != NULL);
+ memset(pubkey, 0, sizeof(*pubkey));
+ ARG_CHECK(input != NULL);
+ if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) {
+ return 0;
+ }
+ secp256k1_pubkey_save(pubkey, &Q);
+ secp256k1_ge_clear(&Q);
+ return 1;
+}
+
+int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_pubkey* pubkey, unsigned int flags) {
+ secp256k1_ge Q;
+ size_t len;
+ int ret = 0;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(outputlen != NULL);
+ ARG_CHECK(*outputlen >= ((flags & SECP256K1_FLAGS_BIT_COMPRESSION) ? 33 : 65));
+ len = *outputlen;
+ *outputlen = 0;
+ ARG_CHECK(output != NULL);
+ memset(output, 0, len);
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION);
+ if (secp256k1_pubkey_load(ctx, &Q, pubkey)) {
+ ret = secp256k1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION);
+ if (ret) {
+ *outputlen = len;
+ }
+ }
+ return ret;
+}
+
+static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) {
+ (void)ctx;
+ if (sizeof(secp256k1_scalar) == 32) {
+ /* When the secp256k1_scalar type is exactly 32 byte, use its
+ * representation inside secp256k1_ecdsa_signature, as conversion is very fast.
+ * Note that secp256k1_ecdsa_signature_save must use the same representation. */
+ memcpy(r, &sig->data[0], 32);
+ memcpy(s, &sig->data[32], 32);
+ } else {
+ secp256k1_scalar_set_b32(r, &sig->data[0], NULL);
+ secp256k1_scalar_set_b32(s, &sig->data[32], NULL);
+ }
+}
+
+static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s) {
+ if (sizeof(secp256k1_scalar) == 32) {
+ memcpy(&sig->data[0], r, 32);
+ memcpy(&sig->data[32], s, 32);
+ } else {
+ secp256k1_scalar_get_b32(&sig->data[0], r);
+ secp256k1_scalar_get_b32(&sig->data[32], s);
+ }
+}
+
+int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
+ secp256k1_scalar r, s;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input != NULL);
+
+ if (secp256k1_ecdsa_sig_parse(&r, &s, input, inputlen)) {
+ secp256k1_ecdsa_signature_save(sig, &r, &s);
+ return 1;
+ } else {
+ memset(sig, 0, sizeof(*sig));
+ return 0;
+ }
+}
+
+int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input64) {
+ secp256k1_scalar r, s;
+ int ret = 1;
+ int overflow = 0;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input64 != NULL);
+
+ secp256k1_scalar_set_b32(&r, &input64[0], &overflow);
+ ret &= !overflow;
+ secp256k1_scalar_set_b32(&s, &input64[32], &overflow);
+ ret &= !overflow;
+ if (ret) {
+ secp256k1_ecdsa_signature_save(sig, &r, &s);
+ } else {
+ memset(sig, 0, sizeof(*sig));
+ }
+ return ret;
+}
+
+int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_ecdsa_signature* sig) {
+ secp256k1_scalar r, s;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output != NULL);
+ ARG_CHECK(outputlen != NULL);
+ ARG_CHECK(sig != NULL);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
+ return secp256k1_ecdsa_sig_serialize(output, outputlen, &r, &s);
+}
+
+int secp256k1_ecdsa_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, const secp256k1_ecdsa_signature* sig) {
+ secp256k1_scalar r, s;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output64 != NULL);
+ ARG_CHECK(sig != NULL);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
+ secp256k1_scalar_get_b32(&output64[0], &r);
+ secp256k1_scalar_get_b32(&output64[32], &s);
+ return 1;
+}
+
+int secp256k1_ecdsa_signature_normalize(const secp256k1_context* ctx, secp256k1_ecdsa_signature *sigout, const secp256k1_ecdsa_signature *sigin) {
+ secp256k1_scalar r, s;
+ int ret = 0;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(sigin != NULL);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sigin);
+ ret = secp256k1_scalar_is_high(&s);
+ if (sigout != NULL) {
+ if (ret) {
+ secp256k1_scalar_negate(&s, &s);
+ }
+ secp256k1_ecdsa_signature_save(sigout, &r, &s);
+ }
+
+ return ret;
+}
+
+int secp256k1_ecdsa_verify(const secp256k1_context* ctx, const secp256k1_ecdsa_signature *sig, const unsigned char *msg32, const secp256k1_pubkey *pubkey) {
+ secp256k1_ge q;
+ secp256k1_scalar r, s;
+ secp256k1_scalar m;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ secp256k1_scalar_set_b32(&m, msg32, NULL);
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
+ return (!secp256k1_scalar_is_high(&s) &&
+ secp256k1_pubkey_load(ctx, &q, pubkey) &&
+ secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m));
+}
+
+static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) {
+ memcpy(buf + *offset, data, len);
+ *offset += len;
+}
+
+static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ unsigned char keydata[112];
+ unsigned int offset = 0;
+ secp256k1_rfc6979_hmac_sha256 rng;
+ unsigned int i;
+ /* We feed a byte array to the PRNG as input, consisting of:
+ * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d.
+ * - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data.
+ * - optionally 16 extra bytes with the algorithm name.
+ * Because the arguments have distinct fixed lengths it is not possible for
+ * different argument mixtures to emulate each other and result in the same
+ * nonces.
+ */
+ buffer_append(keydata, &offset, key32, 32);
+ buffer_append(keydata, &offset, msg32, 32);
+ if (data != NULL) {
+ buffer_append(keydata, &offset, data, 32);
+ }
+ if (algo16 != NULL) {
+ buffer_append(keydata, &offset, algo16, 16);
+ }
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, offset);
+ memset(keydata, 0, sizeof(keydata));
+ for (i = 0; i <= counter; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+ return 1;
+}
+
+const secp256k1_nonce_function secp256k1_nonce_function_rfc6979 = nonce_function_rfc6979;
+const secp256k1_nonce_function secp256k1_nonce_function_default = nonce_function_rfc6979;
+
+int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
+ secp256k1_scalar r, s;
+ secp256k1_scalar sec, non, msg;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(seckey != NULL);
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ /* Fail if the secret key is invalid. */
+ if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
+ unsigned char nonce32[32];
+ unsigned int count = 0;
+ secp256k1_scalar_set_b32(&msg, msg32, NULL);
+ while (1) {
+ ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&non, nonce32, &overflow);
+ if (!overflow && !secp256k1_scalar_is_zero(&non)) {
+ if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) {
+ break;
+ }
+ }
+ count++;
+ }
+ memset(nonce32, 0, 32);
+ secp256k1_scalar_clear(&msg);
+ secp256k1_scalar_clear(&non);
+ secp256k1_scalar_clear(&sec);
+ }
+ if (ret) {
+ secp256k1_ecdsa_signature_save(signature, &r, &s);
+ } else {
+ memset(signature, 0, sizeof(*signature));
+ }
+ return ret;
+}
+
+int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char *seckey) {
+ secp256k1_scalar sec;
+ int ret;
+ int overflow;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ ret = !overflow && !secp256k1_scalar_is_zero(&sec);
+ secp256k1_scalar_clear(&sec);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) {
+ secp256k1_gej pj;
+ secp256k1_ge p;
+ secp256k1_scalar sec;
+ int overflow;
+ int ret = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(pubkey != NULL);
+ memset(pubkey, 0, sizeof(*pubkey));
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(seckey != NULL);
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ ret = (!overflow) & (!secp256k1_scalar_is_zero(&sec));
+ if (ret) {
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec);
+ secp256k1_ge_set_gej(&p, &pj);
+ secp256k1_pubkey_save(pubkey, &p);
+ }
+ secp256k1_scalar_clear(&sec);
+ return ret;
+}
+
+int secp256k1_ec_privkey_negate(const secp256k1_context* ctx, unsigned char *seckey) {
+ secp256k1_scalar sec;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+
+ secp256k1_scalar_set_b32(&sec, seckey, NULL);
+ secp256k1_scalar_negate(&sec, &sec);
+ secp256k1_scalar_get_b32(seckey, &sec);
+
+ return 1;
+}
+
+int secp256k1_ec_pubkey_negate(const secp256k1_context* ctx, secp256k1_pubkey *pubkey) {
+ int ret = 0;
+ secp256k1_ge p;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ ret = secp256k1_pubkey_load(ctx, &p, pubkey);
+ memset(pubkey, 0, sizeof(*pubkey));
+ if (ret) {
+ secp256k1_ge_neg(&p, &p);
+ secp256k1_pubkey_save(pubkey, &p);
+ }
+ return ret;
+}
+
+int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
+ secp256k1_scalar term;
+ secp256k1_scalar sec;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&term, tweak, &overflow);
+ secp256k1_scalar_set_b32(&sec, seckey, NULL);
+
+ ret = !overflow && secp256k1_eckey_privkey_tweak_add(&sec, &term);
+ memset(seckey, 0, 32);
+ if (ret) {
+ secp256k1_scalar_get_b32(seckey, &sec);
+ }
+
+ secp256k1_scalar_clear(&sec);
+ secp256k1_scalar_clear(&term);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
+ secp256k1_ge p;
+ secp256k1_scalar term;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&term, tweak, &overflow);
+ ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey);
+ memset(pubkey, 0, sizeof(*pubkey));
+ if (ret) {
+ if (secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term)) {
+ secp256k1_pubkey_save(pubkey, &p);
+ } else {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
+ secp256k1_scalar factor;
+ secp256k1_scalar sec;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&factor, tweak, &overflow);
+ secp256k1_scalar_set_b32(&sec, seckey, NULL);
+ ret = !overflow && secp256k1_eckey_privkey_tweak_mul(&sec, &factor);
+ memset(seckey, 0, 32);
+ if (ret) {
+ secp256k1_scalar_get_b32(seckey, &sec);
+ }
+
+ secp256k1_scalar_clear(&sec);
+ secp256k1_scalar_clear(&factor);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
+ secp256k1_ge p;
+ secp256k1_scalar factor;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&factor, tweak, &overflow);
+ ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey);
+ memset(pubkey, 0, sizeof(*pubkey));
+ if (ret) {
+ if (secp256k1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) {
+ secp256k1_pubkey_save(pubkey, &p);
+ } else {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
+ return 1;
+}
+
+int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, const secp256k1_pubkey * const *pubnonces, size_t n) {
+ size_t i;
+ secp256k1_gej Qj;
+ secp256k1_ge Q;
+
+ ARG_CHECK(pubnonce != NULL);
+ memset(pubnonce, 0, sizeof(*pubnonce));
+ ARG_CHECK(n >= 1);
+ ARG_CHECK(pubnonces != NULL);
+
+ secp256k1_gej_set_infinity(&Qj);
+
+ for (i = 0; i < n; i++) {
+ secp256k1_pubkey_load(ctx, &Q, pubnonces[i]);
+ secp256k1_gej_add_ge(&Qj, &Qj, &Q);
+ }
+ if (secp256k1_gej_is_infinity(&Qj)) {
+ return 0;
+ }
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(pubnonce, &Q);
+ return 1;
+}
+
+#ifdef ENABLE_MODULE_ECDH
+# include "modules/ecdh/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+# include "modules/recovery/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_GENERATOR
+# include "modules/generator/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_COMMITMENT
+# include "modules/commitment/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_RANGEPROOF
+# include "modules/rangeproof/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_BULLETPROOF
+# include "modules/bulletproofs/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_WHITELIST
+# include "modules/whitelist/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_SURJECTIONPROOF
+# include "modules/surjection/main_impl.h"
+#endif
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/testrand.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/testrand.h
new file mode 100644
index 0000000..8259959
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/testrand.h
@@ -0,0 +1,41 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_TESTRAND_H
+#define SECP256K1_TESTRAND_H
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+/* A non-cryptographic RNG used only for test infrastructure. */
+
+/** Seed the pseudorandom number generator for testing. */
+SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16);
+
+/** Generate a pseudorandom number in the range [0..2**32-1]. */
+static uint32_t secp256k1_rand32(void);
+
+/** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or
+ * more. */
+static uint32_t secp256k1_rand_bits(int bits);
+
+/** Generate a pseudorandom number in the range [0..range-1]. */
+static uint32_t secp256k1_rand_int(uint32_t range);
+
+/** Generate a pseudorandom 32-byte array. */
+static void secp256k1_rand256(unsigned char *b32);
+
+/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */
+static void secp256k1_rand256_test(unsigned char *b32);
+
+/** Generate pseudorandom bytes with long sequences of zero and one bits. */
+static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len);
+
+/** Generate a pseudorandom 64-bit integer in the range min..max, inclusive. */
+static int64_t secp256k1_rands64(uint64_t min, uint64_t max);
+
+#endif /* SECP256K1_TESTRAND_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/testrand_impl.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/testrand_impl.h
new file mode 100644
index 0000000..0db523d
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/testrand_impl.h
@@ -0,0 +1,127 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille, Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_TESTRAND_IMPL_H
+#define SECP256K1_TESTRAND_IMPL_H
+
+#include
+#include
+
+#include "testrand.h"
+#include "hash.h"
+
+static secp256k1_rfc6979_hmac_sha256 secp256k1_test_rng;
+static uint32_t secp256k1_test_rng_precomputed[8];
+static int secp256k1_test_rng_precomputed_used = 8;
+static uint64_t secp256k1_test_rng_integer;
+static int secp256k1_test_rng_integer_bits_left = 0;
+
+SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) {
+ secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16);
+}
+
+SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
+ if (secp256k1_test_rng_precomputed_used == 8) {
+ secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed));
+ secp256k1_test_rng_precomputed_used = 0;
+ }
+ return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++];
+}
+
+static uint32_t secp256k1_rand_bits(int bits) {
+ uint32_t ret;
+ if (secp256k1_test_rng_integer_bits_left < bits) {
+ secp256k1_test_rng_integer |= (((uint64_t)secp256k1_rand32()) << secp256k1_test_rng_integer_bits_left);
+ secp256k1_test_rng_integer_bits_left += 32;
+ }
+ ret = secp256k1_test_rng_integer;
+ secp256k1_test_rng_integer >>= bits;
+ secp256k1_test_rng_integer_bits_left -= bits;
+ ret &= ((~((uint32_t)0)) >> (32 - bits));
+ return ret;
+}
+
+static uint32_t secp256k1_rand_int(uint32_t range) {
+ /* We want a uniform integer between 0 and range-1, inclusive.
+ * B is the smallest number such that range <= 2**B.
+ * two mechanisms implemented here:
+ * - generate B bits numbers until one below range is found, and return it
+ * - find the largest multiple M of range that is <= 2**(B+A), generate B+A
+ * bits numbers until one below M is found, and return it modulo range
+ * The second mechanism consumes A more bits of entropy in every iteration,
+ * but may need fewer iterations due to M being closer to 2**(B+A) then
+ * range is to 2**B. The array below (indexed by B) contains a 0 when the
+ * first mechanism is to be used, and the number A otherwise.
+ */
+ static const int addbits[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 0};
+ uint32_t trange, mult;
+ int bits = 0;
+ if (range <= 1) {
+ return 0;
+ }
+ trange = range - 1;
+ while (trange > 0) {
+ trange >>= 1;
+ bits++;
+ }
+ if (addbits[bits]) {
+ bits = bits + addbits[bits];
+ mult = ((~((uint32_t)0)) >> (32 - bits)) / range;
+ trange = range * mult;
+ } else {
+ trange = range;
+ mult = 1;
+ }
+ while(1) {
+ uint32_t x = secp256k1_rand_bits(bits);
+ if (x < trange) {
+ return (mult == 1) ? x : (x % range);
+ }
+ }
+}
+
+static void secp256k1_rand256(unsigned char *b32) {
+ secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32);
+}
+
+static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) {
+ size_t bits = 0;
+ memset(bytes, 0, len);
+ while (bits < len * 8) {
+ int now;
+ uint32_t val;
+ now = 1 + (secp256k1_rand_bits(6) * secp256k1_rand_bits(5) + 16) / 31;
+ val = secp256k1_rand_bits(1);
+ while (now > 0 && bits < len * 8) {
+ bytes[bits / 8] |= val << (bits % 8);
+ now--;
+ bits++;
+ }
+ }
+}
+
+static void secp256k1_rand256_test(unsigned char *b32) {
+ secp256k1_rand_bytes_test(b32, 32);
+}
+
+SECP256K1_INLINE static int64_t secp256k1_rands64(uint64_t min, uint64_t max) {
+ uint64_t range;
+ uint64_t r;
+ uint64_t clz;
+ VERIFY_CHECK(max >= min);
+ if (max == min) {
+ return min;
+ }
+ range = max - min;
+ clz = secp256k1_clz64_var(range);
+ do {
+ r = ((uint64_t)secp256k1_rand32() << 32) | secp256k1_rand32();
+ r >>= clz;
+ } while (r > range);
+ return min + (int64_t)r;
+}
+
+#endif /* SECP256K1_TESTRAND_IMPL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/tests.c b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/tests.c
new file mode 100644
index 0000000..7b0af8a
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/tests.c
@@ -0,0 +1,5231 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille, Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include
+#include
+#include
+
+#include
+
+#include "secp256k1.c"
+#include "include/secp256k1.h"
+#include "testrand_impl.h"
+
+#ifdef ENABLE_OPENSSL_TESTS
+#include "openssl/bn.h"
+#include "openssl/ec.h"
+#include "openssl/ecdsa.h"
+#include "openssl/obj_mac.h"
+# if OPENSSL_VERSION_NUMBER < 0x10100000L
+void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps) {*pr = sig->r; *ps = sig->s;}
+# endif
+#endif
+
+#include "contrib/lax_der_parsing.c"
+#include "contrib/lax_der_privatekey_parsing.c"
+
+#if !defined(VG_CHECK)
+# if defined(VALGRIND)
+# include
+# define VG_UNDEF(x,y) VALGRIND_MAKE_MEM_UNDEFINED((x),(y))
+# define VG_CHECK(x,y) VALGRIND_CHECK_MEM_IS_DEFINED((x),(y))
+# else
+# define VG_UNDEF(x,y)
+# define VG_CHECK(x,y)
+# endif
+#endif
+
+static int count = 64;
+static secp256k1_context *ctx = NULL;
+
+static void counting_illegal_callback_fn(const char* str, void* data) {
+ /* Dummy callback function that just counts. */
+ int32_t *p;
+ (void)str;
+ p = data;
+ (*p)++;
+}
+
+static void uncounting_illegal_callback_fn(const char* str, void* data) {
+ /* Dummy callback function that just counts (backwards). */
+ int32_t *p;
+ (void)str;
+ p = data;
+ (*p)--;
+}
+
+void random_field_element_test(secp256k1_fe *fe) {
+ do {
+ unsigned char b32[32];
+ secp256k1_rand256_test(b32);
+ if (secp256k1_fe_set_b32(fe, b32)) {
+ break;
+ }
+ } while(1);
+}
+
+void random_field_element_magnitude(secp256k1_fe *fe) {
+ secp256k1_fe zero;
+ int n = secp256k1_rand_int(9);
+ secp256k1_fe_normalize(fe);
+ if (n == 0) {
+ return;
+ }
+ secp256k1_fe_clear(&zero);
+ secp256k1_fe_negate(&zero, &zero, 0);
+ secp256k1_fe_mul_int(&zero, n - 1);
+ secp256k1_fe_add(fe, &zero);
+ VERIFY_CHECK(fe->magnitude == n);
+}
+
+void random_group_element_test(secp256k1_ge *ge) {
+ secp256k1_fe fe;
+ do {
+ random_field_element_test(&fe);
+ if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand_bits(1))) {
+ secp256k1_fe_normalize(&ge->y);
+ break;
+ }
+ } while(1);
+}
+
+void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) {
+ secp256k1_fe z2, z3;
+ do {
+ random_field_element_test(&gej->z);
+ if (!secp256k1_fe_is_zero(&gej->z)) {
+ break;
+ }
+ } while(1);
+ secp256k1_fe_sqr(&z2, &gej->z);
+ secp256k1_fe_mul(&z3, &z2, &gej->z);
+ secp256k1_fe_mul(&gej->x, &ge->x, &z2);
+ secp256k1_fe_mul(&gej->y, &ge->y, &z3);
+ gej->infinity = ge->infinity;
+}
+
+void random_scalar_order_test(secp256k1_scalar *num) {
+ do {
+ unsigned char b32[32];
+ int overflow = 0;
+ secp256k1_rand256_test(b32);
+ secp256k1_scalar_set_b32(num, b32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(num)) {
+ continue;
+ }
+ break;
+ } while(1);
+}
+
+void random_scalar_order(secp256k1_scalar *num) {
+ do {
+ unsigned char b32[32];
+ int overflow = 0;
+ secp256k1_rand256(b32);
+ secp256k1_scalar_set_b32(num, b32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(num)) {
+ continue;
+ }
+ break;
+ } while(1);
+}
+
+void run_util_tests(void) {
+ int i;
+ uint64_t r;
+ uint64_t r2;
+ uint64_t r3;
+ int64_t s;
+ CHECK(secp256k1_clz64_var(0) == 64);
+ CHECK(secp256k1_clz64_var(1) == 63);
+ CHECK(secp256k1_clz64_var(2) == 62);
+ CHECK(secp256k1_clz64_var(3) == 62);
+ CHECK(secp256k1_clz64_var(~0ULL) == 0);
+ CHECK(secp256k1_clz64_var((~0ULL) - 1) == 0);
+ CHECK(secp256k1_clz64_var((~0ULL) >> 1) == 1);
+ CHECK(secp256k1_clz64_var((~0ULL) >> 2) == 2);
+ CHECK(secp256k1_sign_and_abs64(&r, INT64_MAX) == 0);
+ CHECK(r == INT64_MAX);
+ CHECK(secp256k1_sign_and_abs64(&r, INT64_MAX - 1) == 0);
+ CHECK(r == INT64_MAX - 1);
+ CHECK(secp256k1_sign_and_abs64(&r, INT64_MIN) == 1);
+ CHECK(r == (uint64_t)INT64_MAX + 1);
+ CHECK(secp256k1_sign_and_abs64(&r, INT64_MIN + 1) == 1);
+ CHECK(r == (uint64_t)INT64_MAX);
+ CHECK(secp256k1_sign_and_abs64(&r, 0) == 0);
+ CHECK(r == 0);
+ CHECK(secp256k1_sign_and_abs64(&r, 1) == 0);
+ CHECK(r == 1);
+ CHECK(secp256k1_sign_and_abs64(&r, -1) == 1);
+ CHECK(r == 1);
+ CHECK(secp256k1_sign_and_abs64(&r, 2) == 0);
+ CHECK(r == 2);
+ CHECK(secp256k1_sign_and_abs64(&r, -2) == 1);
+ CHECK(r == 2);
+ for (i = 0; i < 10; i++) {
+ CHECK(secp256k1_clz64_var((~0ULL) - secp256k1_rand32()) == 0);
+ r = ((uint64_t)secp256k1_rand32() << 32) | secp256k1_rand32();
+ r2 = secp256k1_rands64(0, r);
+ CHECK(r2 <= r);
+ r3 = secp256k1_rands64(r2, r);
+ CHECK((r3 >= r2) && (r3 <= r));
+ r = secp256k1_rands64(0, INT64_MAX);
+ s = (int64_t)r * (secp256k1_rand32()&1?-1:1);
+ CHECK(secp256k1_sign_and_abs64(&r2, s) == (s < 0));
+ CHECK(r2 == r);
+ }
+}
+
+void run_context_tests(void) {
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey zero_pubkey;
+ secp256k1_ecdsa_signature sig;
+ unsigned char ctmp[32];
+ int32_t ecount;
+ int32_t ecount2;
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ secp256k1_gej pubj;
+ secp256k1_ge pub;
+ secp256k1_scalar msg, key, nonce;
+ secp256k1_scalar sigr, sigs;
+
+ memset(&zero_pubkey, 0, sizeof(zero_pubkey));
+
+ ecount = 0;
+ ecount2 = 10;
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, NULL);
+ CHECK(vrfy->error_callback.fn != sign->error_callback.fn);
+
+ /*** clone and destroy all of them to make sure cloning was complete ***/
+ {
+ secp256k1_context *ctx_tmp;
+
+ ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_destroy(ctx_tmp);
+ }
+
+ /* Verify that the error callback makes it across the clone. */
+ CHECK(vrfy->error_callback.fn != sign->error_callback.fn);
+ /* And that it resets back to default. */
+ secp256k1_context_set_error_callback(sign, NULL, NULL);
+ CHECK(vrfy->error_callback.fn == sign->error_callback.fn);
+
+ /*** attempt to use them ***/
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key);
+ secp256k1_ge_set_gej(&pub, &pubj);
+
+ /* Verify context-type checking illegal-argument errors. */
+ memset(ctmp, 1, 32);
+ CHECK(secp256k1_ec_pubkey_create(vrfy, &pubkey, ctmp) == 0);
+ CHECK(ecount == 1);
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(sign, &pubkey, ctmp) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ecdsa_sign(vrfy, &sig, ctmp, ctmp, NULL, NULL) == 0);
+ CHECK(ecount == 2);
+ VG_UNDEF(&sig, sizeof(sig));
+ CHECK(secp256k1_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1);
+ VG_CHECK(&sig, sizeof(sig));
+ CHECK(ecount2 == 10);
+ CHECK(secp256k1_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 0);
+ CHECK(ecount2 == 11);
+ CHECK(secp256k1_ecdsa_verify(vrfy, &sig, ctmp, &pubkey) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 0);
+ CHECK(ecount2 == 12);
+ CHECK(secp256k1_ec_pubkey_tweak_add(vrfy, &pubkey, ctmp) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0);
+ CHECK(ecount2 == 13);
+ CHECK(secp256k1_ec_pubkey_negate(vrfy, &pubkey) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ec_pubkey_negate(sign, &pubkey) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ec_pubkey_negate(sign, NULL) == 0);
+ CHECK(ecount2 == 14);
+ CHECK(secp256k1_ec_pubkey_negate(vrfy, &zero_pubkey) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_context_randomize(vrfy, ctmp) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_context_randomize(sign, NULL) == 1);
+ CHECK(ecount2 == 14);
+ secp256k1_context_set_illegal_callback(vrfy, NULL, NULL);
+ secp256k1_context_set_illegal_callback(sign, NULL, NULL);
+
+ /* This shouldn't leak memory, due to already-set tests. */
+ secp256k1_ecmult_gen_context_build(&sign->ecmult_gen_ctx, NULL);
+ secp256k1_ecmult_context_build(&vrfy->ecmult_ctx, NULL);
+
+ /* obtain a working nonce */
+ do {
+ random_scalar_order_test(&nonce);
+ } while(!secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+
+ /* try signing */
+ CHECK(secp256k1_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+ CHECK(secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+
+ /* try verifying */
+ CHECK(secp256k1_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+ CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+
+ /* cleanup */
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+ /* Defined as no-op. */
+ secp256k1_context_destroy(NULL);
+}
+
+void run_scratch_tests(void) {
+ int32_t ecount = 0;
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_scratch_space *scratch;
+
+ /* Test public API */
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+
+ scratch = secp256k1_scratch_space_create(none, 1000);
+ CHECK(scratch != NULL);
+ CHECK(ecount == 0);
+
+ /* Test internal API */
+ CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
+ CHECK(secp256k1_scratch_max_allocation(scratch, 1) < 1000);
+
+ /* Allocating 500 bytes with no frame fails */
+ CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
+ CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
+
+ /* ...but pushing a new stack frame does affect the max allocation */
+ CHECK(secp256k1_scratch_allocate_frame(scratch, 500, 1 == 1));
+ CHECK(secp256k1_scratch_max_allocation(scratch, 1) < 500); /* 500 - ALIGNMENT */
+ CHECK(secp256k1_scratch_alloc(scratch, 500) != NULL);
+ CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
+
+ CHECK(secp256k1_scratch_allocate_frame(scratch, 500, 1) == 0);
+
+ /* ...and this effect is undone by popping the frame */
+ secp256k1_scratch_deallocate_frame(scratch);
+ CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
+ CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
+
+ /* cleanup */
+ secp256k1_scratch_space_destroy(scratch);
+ secp256k1_context_destroy(none);
+}
+
+/***** HASH TESTS *****/
+
+void run_sha256_tests(void) {
+ static const char *inputs[8] = {
+ "", "abc", "message digest", "secure hash algorithm", "SHA256 is considered to be safe",
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
+ "For this sample, this 63-byte string will be used as input data",
+ "This is exactly 64 bytes long, not counting the terminating byte"
+ };
+ static const unsigned char outputs[8][32] = {
+ {0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
+ {0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad},
+ {0xf7, 0x84, 0x6f, 0x55, 0xcf, 0x23, 0xe1, 0x4e, 0xeb, 0xea, 0xb5, 0xb4, 0xe1, 0x55, 0x0c, 0xad, 0x5b, 0x50, 0x9e, 0x33, 0x48, 0xfb, 0xc4, 0xef, 0xa3, 0xa1, 0x41, 0x3d, 0x39, 0x3c, 0xb6, 0x50},
+ {0xf3, 0x0c, 0xeb, 0x2b, 0xb2, 0x82, 0x9e, 0x79, 0xe4, 0xca, 0x97, 0x53, 0xd3, 0x5a, 0x8e, 0xcc, 0x00, 0x26, 0x2d, 0x16, 0x4c, 0xc0, 0x77, 0x08, 0x02, 0x95, 0x38, 0x1c, 0xbd, 0x64, 0x3f, 0x0d},
+ {0x68, 0x19, 0xd9, 0x15, 0xc7, 0x3f, 0x4d, 0x1e, 0x77, 0xe4, 0xe1, 0xb5, 0x2d, 0x1f, 0xa0, 0xf9, 0xcf, 0x9b, 0xea, 0xea, 0xd3, 0x93, 0x9f, 0x15, 0x87, 0x4b, 0xd9, 0x88, 0xe2, 0xa2, 0x36, 0x30},
+ {0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1},
+ {0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42},
+ {0xab, 0x64, 0xef, 0xf7, 0xe8, 0x8e, 0x2e, 0x46, 0x16, 0x5e, 0x29, 0xf2, 0xbc, 0xe4, 0x18, 0x26, 0xbd, 0x4c, 0x7b, 0x35, 0x52, 0xf6, 0xb3, 0x82, 0xa9, 0xe7, 0xd3, 0xaf, 0x47, 0xc2, 0x45, 0xf8}
+ };
+ int i;
+ for (i = 0; i < 8; i++) {
+ unsigned char out[32];
+ secp256k1_sha256 hasher;
+ secp256k1_sha256_initialize(&hasher);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
+ secp256k1_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ if (strlen(inputs[i]) > 0) {
+ int split = secp256k1_rand_int(strlen(inputs[i]));
+ secp256k1_sha256_initialize(&hasher);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
+ secp256k1_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ }
+ }
+}
+
+void run_hmac_sha256_tests(void) {
+ static const char *keys[6] = {
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ "\x4a\x65\x66\x65",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+ "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ };
+ static const char *inputs[6] = {
+ "\x48\x69\x20\x54\x68\x65\x72\x65",
+ "\x77\x68\x61\x74\x20\x64\x6f\x20\x79\x61\x20\x77\x61\x6e\x74\x20\x66\x6f\x72\x20\x6e\x6f\x74\x68\x69\x6e\x67\x3f",
+ "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
+ "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
+ "\x54\x65\x73\x74\x20\x55\x73\x69\x6e\x67\x20\x4c\x61\x72\x67\x65\x72\x20\x54\x68\x61\x6e\x20\x42\x6c\x6f\x63\x6b\x2d\x53\x69\x7a\x65\x20\x4b\x65\x79\x20\x2d\x20\x48\x61\x73\x68\x20\x4b\x65\x79\x20\x46\x69\x72\x73\x74",
+ "\x54\x68\x69\x73\x20\x69\x73\x20\x61\x20\x74\x65\x73\x74\x20\x75\x73\x69\x6e\x67\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x6b\x65\x79\x20\x61\x6e\x64\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x64\x61\x74\x61\x2e\x20\x54\x68\x65\x20\x6b\x65\x79\x20\x6e\x65\x65\x64\x73\x20\x74\x6f\x20\x62\x65\x20\x68\x61\x73\x68\x65\x64\x20\x62\x65\x66\x6f\x72\x65\x20\x62\x65\x69\x6e\x67\x20\x75\x73\x65\x64\x20\x62\x79\x20\x74\x68\x65\x20\x48\x4d\x41\x43\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x2e"
+ };
+ static const unsigned char outputs[6][32] = {
+ {0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53, 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b, 0x88, 0x1d, 0xc2, 0x00, 0xc9, 0x83, 0x3d, 0xa7, 0x26, 0xe9, 0x37, 0x6c, 0x2e, 0x32, 0xcf, 0xf7},
+ {0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e, 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7, 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83, 0x9d, 0xec, 0x58, 0xb9, 0x64, 0xec, 0x38, 0x43},
+ {0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46, 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7, 0x29, 0x59, 0x09, 0x8b, 0x3e, 0xf8, 0xc1, 0x22, 0xd9, 0x63, 0x55, 0x14, 0xce, 0xd5, 0x65, 0xfe},
+ {0x82, 0x55, 0x8a, 0x38, 0x9a, 0x44, 0x3c, 0x0e, 0xa4, 0xcc, 0x81, 0x98, 0x99, 0xf2, 0x08, 0x3a, 0x85, 0xf0, 0xfa, 0xa3, 0xe5, 0x78, 0xf8, 0x07, 0x7a, 0x2e, 0x3f, 0xf4, 0x67, 0x29, 0x66, 0x5b},
+ {0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f, 0x0d, 0x8a, 0x26, 0xaa, 0xcb, 0xf5, 0xb7, 0x7f, 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28, 0xc5, 0x14, 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54},
+ {0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb, 0x27, 0x63, 0x5f, 0xbc, 0xd5, 0xb0, 0xe9, 0x44, 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07, 0x13, 0x93, 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2}
+ };
+ int i;
+ for (i = 0; i < 6; i++) {
+ secp256k1_hmac_sha256 hasher;
+ unsigned char out[32];
+ secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
+ secp256k1_hmac_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ if (strlen(inputs[i]) > 0) {
+ int split = secp256k1_rand_int(strlen(inputs[i]));
+ secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
+ secp256k1_hmac_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ }
+ }
+}
+
+void run_rfc6979_hmac_sha256_tests(void) {
+ static const unsigned char key1[65] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x4b, 0xf5, 0x12, 0x2f, 0x34, 0x45, 0x54, 0xc5, 0x3b, 0xde, 0x2e, 0xbb, 0x8c, 0xd2, 0xb7, 0xe3, 0xd1, 0x60, 0x0a, 0xd6, 0x31, 0xc3, 0x85, 0xa5, 0xd7, 0xcc, 0xe2, 0x3c, 0x77, 0x85, 0x45, 0x9a, 0};
+ static const unsigned char out1[3][32] = {
+ {0x4f, 0xe2, 0x95, 0x25, 0xb2, 0x08, 0x68, 0x09, 0x15, 0x9a, 0xcd, 0xf0, 0x50, 0x6e, 0xfb, 0x86, 0xb0, 0xec, 0x93, 0x2c, 0x7b, 0xa4, 0x42, 0x56, 0xab, 0x32, 0x1e, 0x42, 0x1e, 0x67, 0xe9, 0xfb},
+ {0x2b, 0xf0, 0xff, 0xf1, 0xd3, 0xc3, 0x78, 0xa2, 0x2d, 0xc5, 0xde, 0x1d, 0x85, 0x65, 0x22, 0x32, 0x5c, 0x65, 0xb5, 0x04, 0x49, 0x1a, 0x0c, 0xbd, 0x01, 0xcb, 0x8f, 0x3a, 0xa6, 0x7f, 0xfd, 0x4a},
+ {0xf5, 0x28, 0xb4, 0x10, 0xcb, 0x54, 0x1f, 0x77, 0x00, 0x0d, 0x7a, 0xfb, 0x6c, 0x5b, 0x53, 0xc5, 0xc4, 0x71, 0xea, 0xb4, 0x3e, 0x46, 0x6d, 0x9a, 0xc5, 0x19, 0x0c, 0x39, 0xc8, 0x2f, 0xd8, 0x2e}
+ };
+
+ static const unsigned char key2[64] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55};
+ static const unsigned char out2[3][32] = {
+ {0x9c, 0x23, 0x6c, 0x16, 0x5b, 0x82, 0xae, 0x0c, 0xd5, 0x90, 0x65, 0x9e, 0x10, 0x0b, 0x6b, 0xab, 0x30, 0x36, 0xe7, 0xba, 0x8b, 0x06, 0x74, 0x9b, 0xaf, 0x69, 0x81, 0xe1, 0x6f, 0x1a, 0x2b, 0x95},
+ {0xdf, 0x47, 0x10, 0x61, 0x62, 0x5b, 0xc0, 0xea, 0x14, 0xb6, 0x82, 0xfe, 0xee, 0x2c, 0x9c, 0x02, 0xf2, 0x35, 0xda, 0x04, 0x20, 0x4c, 0x1d, 0x62, 0xa1, 0x53, 0x6c, 0x6e, 0x17, 0xae, 0xd7, 0xa9},
+ {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94}
+ };
+
+ secp256k1_rfc6979_hmac_sha256 rng;
+ unsigned char out[32];
+ int i;
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out1[i], 32) == 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out1[i], 32) != 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out2[i], 32) == 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+}
+
+/***** RANDOM TESTS *****/
+
+void test_rand_bits(int rand32, int bits) {
+ /* (1-1/2^B)^rounds[B] < 1/10^9, so rounds is the number of iterations to
+ * get a false negative chance below once in a billion */
+ static const unsigned int rounds[7] = {1, 30, 73, 156, 322, 653, 1316};
+ /* We try multiplying the results with various odd numbers, which shouldn't
+ * influence the uniform distribution modulo a power of 2. */
+ static const uint32_t mults[6] = {1, 3, 21, 289, 0x9999, 0x80402011};
+ /* We only select up to 6 bits from the output to analyse */
+ unsigned int usebits = bits > 6 ? 6 : bits;
+ unsigned int maxshift = bits - usebits;
+ /* For each of the maxshift+1 usebits-bit sequences inside a bits-bit
+ number, track all observed outcomes, one per bit in a uint64_t. */
+ uint64_t x[6][27] = {{0}};
+ unsigned int i, shift, m;
+ /* Multiply the output of all rand calls with the odd number m, which
+ should not change the uniformity of its distribution. */
+ for (i = 0; i < rounds[usebits]; i++) {
+ uint32_t r = (rand32 ? secp256k1_rand32() : secp256k1_rand_bits(bits));
+ CHECK((((uint64_t)r) >> bits) == 0);
+ for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) {
+ uint32_t rm = r * mults[m];
+ for (shift = 0; shift <= maxshift; shift++) {
+ x[m][shift] |= (((uint64_t)1) << ((rm >> shift) & ((1 << usebits) - 1)));
+ }
+ }
+ }
+ for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) {
+ for (shift = 0; shift <= maxshift; shift++) {
+ /* Test that the lower usebits bits of x[shift] are 1 */
+ CHECK(((~x[m][shift]) << (64 - (1 << usebits))) == 0);
+ }
+ }
+}
+
+/* Subrange must be a whole divisor of range, and at most 64 */
+void test_rand_int(uint32_t range, uint32_t subrange) {
+ /* (1-1/subrange)^rounds < 1/10^9 */
+ int rounds = (subrange * 2073) / 100;
+ int i;
+ uint64_t x = 0;
+ CHECK((range % subrange) == 0);
+ for (i = 0; i < rounds; i++) {
+ uint32_t r = secp256k1_rand_int(range);
+ CHECK(r < range);
+ r = r % subrange;
+ x |= (((uint64_t)1) << r);
+ }
+ /* Test that the lower subrange bits of x are 1. */
+ CHECK(((~x) << (64 - subrange)) == 0);
+}
+
+void run_rand_bits(void) {
+ size_t b;
+ test_rand_bits(1, 32);
+ for (b = 1; b <= 32; b++) {
+ test_rand_bits(0, b);
+ }
+}
+
+void run_rand_int(void) {
+ static const uint32_t ms[] = {1, 3, 17, 1000, 13771, 999999, 33554432};
+ static const uint32_t ss[] = {1, 3, 6, 9, 13, 31, 64};
+ unsigned int m, s;
+ for (m = 0; m < sizeof(ms) / sizeof(ms[0]); m++) {
+ for (s = 0; s < sizeof(ss) / sizeof(ss[0]); s++) {
+ test_rand_int(ms[m] * ss[s], ss[s]);
+ }
+ }
+}
+
+/***** NUM TESTS *****/
+
+#ifndef USE_NUM_NONE
+void random_num_negate(secp256k1_num *num) {
+ if (secp256k1_rand_bits(1)) {
+ secp256k1_num_negate(num);
+ }
+}
+
+void random_num_order_test(secp256k1_num *num) {
+ secp256k1_scalar sc;
+ random_scalar_order_test(&sc);
+ secp256k1_scalar_get_num(num, &sc);
+}
+
+void random_num_order(secp256k1_num *num) {
+ secp256k1_scalar sc;
+ random_scalar_order(&sc);
+ secp256k1_scalar_get_num(num, &sc);
+}
+
+void test_num_negate(void) {
+ secp256k1_num n1;
+ secp256k1_num n2;
+ random_num_order_test(&n1); /* n1 = R */
+ random_num_negate(&n1);
+ secp256k1_num_copy(&n2, &n1); /* n2 = R */
+ secp256k1_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */
+ CHECK(secp256k1_num_is_zero(&n1));
+ secp256k1_num_copy(&n1, &n2); /* n1 = R */
+ secp256k1_num_negate(&n1); /* n1 = -R */
+ CHECK(!secp256k1_num_is_zero(&n1));
+ secp256k1_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */
+ CHECK(secp256k1_num_is_zero(&n1));
+ secp256k1_num_copy(&n1, &n2); /* n1 = R */
+ secp256k1_num_negate(&n1); /* n1 = -R */
+ CHECK(secp256k1_num_is_neg(&n1) != secp256k1_num_is_neg(&n2));
+ secp256k1_num_negate(&n1); /* n1 = R */
+ CHECK(secp256k1_num_eq(&n1, &n2));
+}
+
+void test_num_add_sub(void) {
+ int i;
+ secp256k1_scalar s;
+ secp256k1_num n1;
+ secp256k1_num n2;
+ secp256k1_num n1p2, n2p1, n1m2, n2m1;
+ random_num_order_test(&n1); /* n1 = R1 */
+ if (secp256k1_rand_bits(1)) {
+ random_num_negate(&n1);
+ }
+ random_num_order_test(&n2); /* n2 = R2 */
+ if (secp256k1_rand_bits(1)) {
+ random_num_negate(&n2);
+ }
+ secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */
+ secp256k1_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */
+ secp256k1_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */
+ secp256k1_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */
+ CHECK(secp256k1_num_eq(&n1p2, &n2p1));
+ CHECK(!secp256k1_num_eq(&n1p2, &n1m2));
+ secp256k1_num_negate(&n2m1); /* n2m1 = -R2 + R1 */
+ CHECK(secp256k1_num_eq(&n2m1, &n1m2));
+ CHECK(!secp256k1_num_eq(&n2m1, &n1));
+ secp256k1_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */
+ CHECK(secp256k1_num_eq(&n2m1, &n1));
+ CHECK(!secp256k1_num_eq(&n2p1, &n1));
+ secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */
+ CHECK(secp256k1_num_eq(&n2p1, &n1));
+
+ /* check is_one */
+ secp256k1_scalar_set_int(&s, 1);
+ secp256k1_scalar_get_num(&n1, &s);
+ CHECK(secp256k1_num_is_one(&n1));
+ /* check that 2^n + 1 is never 1 */
+ secp256k1_scalar_get_num(&n2, &s);
+ for (i = 0; i < 250; ++i) {
+ secp256k1_num_add(&n1, &n1, &n1); /* n1 *= 2 */
+ secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */
+ CHECK(!secp256k1_num_is_one(&n1p2));
+ }
+}
+
+void test_num_mod(void) {
+ int i;
+ secp256k1_scalar s;
+ secp256k1_num order, n;
+
+ /* check that 0 mod anything is 0 */
+ random_scalar_order_test(&s);
+ secp256k1_scalar_get_num(&order, &s);
+ secp256k1_scalar_set_int(&s, 0);
+ secp256k1_scalar_get_num(&n, &s);
+ secp256k1_num_mod(&n, &order);
+ CHECK(secp256k1_num_is_zero(&n));
+
+ /* check that anything mod 1 is 0 */
+ secp256k1_scalar_set_int(&s, 1);
+ secp256k1_scalar_get_num(&order, &s);
+ secp256k1_scalar_get_num(&n, &s);
+ secp256k1_num_mod(&n, &order);
+ CHECK(secp256k1_num_is_zero(&n));
+
+ /* check that increasing the number past 2^256 does not break this */
+ random_scalar_order_test(&s);
+ secp256k1_scalar_get_num(&n, &s);
+ /* multiply by 2^8, which'll test this case with high probability */
+ for (i = 0; i < 8; ++i) {
+ secp256k1_num_add(&n, &n, &n);
+ }
+ secp256k1_num_mod(&n, &order);
+ CHECK(secp256k1_num_is_zero(&n));
+}
+
+void test_num_jacobi(void) {
+ secp256k1_scalar sqr;
+ secp256k1_scalar small;
+ secp256k1_scalar five; /* five is not a quadratic residue */
+ secp256k1_num order, n;
+ int i;
+ /* squares mod 5 are 1, 4 */
+ const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 };
+
+ /* check some small values with 5 as the order */
+ secp256k1_scalar_set_int(&five, 5);
+ secp256k1_scalar_get_num(&order, &five);
+ for (i = 0; i < 10; ++i) {
+ secp256k1_scalar_set_int(&small, i);
+ secp256k1_scalar_get_num(&n, &small);
+ CHECK(secp256k1_num_jacobi(&n, &order) == jacobi5[i]);
+ }
+
+ /** test large values with 5 as group order */
+ secp256k1_scalar_get_num(&order, &five);
+ /* we first need a scalar which is not a multiple of 5 */
+ do {
+ secp256k1_num fiven;
+ random_scalar_order_test(&sqr);
+ secp256k1_scalar_get_num(&fiven, &five);
+ secp256k1_scalar_get_num(&n, &sqr);
+ secp256k1_num_mod(&n, &fiven);
+ } while (secp256k1_num_is_zero(&n));
+ /* next force it to be a residue. 2 is a nonresidue mod 5 so we can
+ * just multiply by two, i.e. add the number to itself */
+ if (secp256k1_num_jacobi(&n, &order) == -1) {
+ secp256k1_num_add(&n, &n, &n);
+ }
+
+ /* test residue */
+ CHECK(secp256k1_num_jacobi(&n, &order) == 1);
+ /* test nonresidue */
+ secp256k1_num_add(&n, &n, &n);
+ CHECK(secp256k1_num_jacobi(&n, &order) == -1);
+
+ /** test with secp group order as order */
+ secp256k1_scalar_order_get_num(&order);
+ random_scalar_order_test(&sqr);
+ secp256k1_scalar_sqr(&sqr, &sqr);
+ /* test residue */
+ secp256k1_scalar_get_num(&n, &sqr);
+ CHECK(secp256k1_num_jacobi(&n, &order) == 1);
+ /* test nonresidue */
+ secp256k1_scalar_mul(&sqr, &sqr, &five);
+ secp256k1_scalar_get_num(&n, &sqr);
+ CHECK(secp256k1_num_jacobi(&n, &order) == -1);
+ /* test multiple of the order*/
+ CHECK(secp256k1_num_jacobi(&order, &order) == 0);
+
+ /* check one less than the order */
+ secp256k1_scalar_set_int(&small, 1);
+ secp256k1_scalar_get_num(&n, &small);
+ secp256k1_num_sub(&n, &order, &n);
+ CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */
+}
+
+void run_num_smalltests(void) {
+ int i;
+ for (i = 0; i < 100*count; i++) {
+ test_num_negate();
+ test_num_add_sub();
+ test_num_mod();
+ test_num_jacobi();
+ }
+}
+#endif
+
+/***** SCALAR TESTS *****/
+
+void scalar_test(void) {
+ secp256k1_scalar s;
+ secp256k1_scalar s1;
+ secp256k1_scalar s2;
+#ifndef USE_NUM_NONE
+ secp256k1_num snum, s1num, s2num;
+ secp256k1_num order, half_order;
+#endif
+ unsigned char c[32];
+
+ /* Set 's' to a random scalar, with value 'snum'. */
+ random_scalar_order_test(&s);
+
+ /* Set 's1' to a random scalar, with value 's1num'. */
+ random_scalar_order_test(&s1);
+
+ /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */
+ random_scalar_order_test(&s2);
+ secp256k1_scalar_get_b32(c, &s2);
+
+#ifndef USE_NUM_NONE
+ secp256k1_scalar_get_num(&snum, &s);
+ secp256k1_scalar_get_num(&s1num, &s1);
+ secp256k1_scalar_get_num(&s2num, &s2);
+
+ secp256k1_scalar_order_get_num(&order);
+ half_order = order;
+ secp256k1_num_shift(&half_order, 1);
+#endif
+
+ {
+ int i;
+ /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */
+ secp256k1_scalar n;
+ secp256k1_scalar_set_int(&n, 0);
+ for (i = 0; i < 256; i += 4) {
+ secp256k1_scalar t;
+ int j;
+ secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4));
+ for (j = 0; j < 4; j++) {
+ secp256k1_scalar_add(&n, &n, &n);
+ }
+ secp256k1_scalar_add(&n, &n, &t);
+ }
+ CHECK(secp256k1_scalar_eq(&n, &s));
+ }
+
+ {
+ /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */
+ secp256k1_scalar n;
+ int i = 0;
+ secp256k1_scalar_set_int(&n, 0);
+ while (i < 256) {
+ secp256k1_scalar t;
+ int j;
+ int now = secp256k1_rand_int(15) + 1;
+ if (now + i > 256) {
+ now = 256 - i;
+ }
+ secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now));
+ for (j = 0; j < now; j++) {
+ secp256k1_scalar_add(&n, &n, &n);
+ }
+ secp256k1_scalar_add(&n, &n, &t);
+ i += now;
+ }
+ CHECK(secp256k1_scalar_eq(&n, &s));
+ }
+
+#ifndef USE_NUM_NONE
+ {
+ /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */
+ secp256k1_num rnum;
+ secp256k1_num r2num;
+ secp256k1_scalar r;
+ secp256k1_num_add(&rnum, &snum, &s2num);
+ secp256k1_num_mod(&rnum, &order);
+ secp256k1_scalar_add(&r, &s, &s2);
+ secp256k1_scalar_get_num(&r2num, &r);
+ CHECK(secp256k1_num_eq(&rnum, &r2num));
+ }
+
+ {
+ /* Test that multiplying the scalars is equal to multiplying their numbers modulo the order. */
+ secp256k1_scalar r;
+ secp256k1_num r2num;
+ secp256k1_num rnum;
+ secp256k1_num_mul(&rnum, &snum, &s2num);
+ secp256k1_num_mod(&rnum, &order);
+ secp256k1_scalar_mul(&r, &s, &s2);
+ secp256k1_scalar_get_num(&r2num, &r);
+ CHECK(secp256k1_num_eq(&rnum, &r2num));
+ /* The result can only be zero if at least one of the factors was zero. */
+ CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2)));
+ /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */
+ CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2)));
+ CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s)));
+ }
+
+ {
+ secp256k1_scalar neg;
+ secp256k1_num negnum;
+ secp256k1_num negnum2;
+ /* Check that comparison with zero matches comparison with zero on the number. */
+ CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s));
+ /* Check that comparison with the half order is equal to testing for high scalar. */
+ CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0));
+ secp256k1_scalar_negate(&neg, &s);
+ secp256k1_num_sub(&negnum, &order, &snum);
+ secp256k1_num_mod(&negnum, &order);
+ /* Check that comparison with the half order is equal to testing for high scalar after negation. */
+ CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0));
+ /* Negating should change the high property, unless the value was already zero. */
+ CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s));
+ secp256k1_scalar_get_num(&negnum2, &neg);
+ /* Negating a scalar should be equal to (order - n) mod order on the number. */
+ CHECK(secp256k1_num_eq(&negnum, &negnum2));
+ secp256k1_scalar_add(&neg, &neg, &s);
+ /* Adding a number to its negation should result in zero. */
+ CHECK(secp256k1_scalar_is_zero(&neg));
+ secp256k1_scalar_negate(&neg, &neg);
+ /* Negating zero should still result in zero. */
+ CHECK(secp256k1_scalar_is_zero(&neg));
+ }
+
+ {
+ /* Test secp256k1_scalar_mul_shift_var. */
+ secp256k1_scalar r;
+ secp256k1_num one;
+ secp256k1_num rnum;
+ secp256k1_num rnum2;
+ unsigned char cone[1] = {0x01};
+ unsigned int shift = 256 + secp256k1_rand_int(257);
+ secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift);
+ secp256k1_num_mul(&rnum, &s1num, &s2num);
+ secp256k1_num_shift(&rnum, shift - 1);
+ secp256k1_num_set_bin(&one, cone, 1);
+ secp256k1_num_add(&rnum, &rnum, &one);
+ secp256k1_num_shift(&rnum, 1);
+ secp256k1_scalar_get_num(&rnum2, &r);
+ CHECK(secp256k1_num_eq(&rnum, &rnum2));
+ }
+
+ {
+ /* test secp256k1_scalar_shr_int */
+ secp256k1_scalar r;
+ int i;
+ random_scalar_order_test(&r);
+ for (i = 0; i < 100; ++i) {
+ int low;
+ int shift = 1 + secp256k1_rand_int(15);
+ int expected = r.d[0] % (1 << shift);
+ low = secp256k1_scalar_shr_int(&r, shift);
+ CHECK(expected == low);
+ }
+ }
+#endif
+
+ {
+ /* Test that scalar inverses are equal to the inverse of their number modulo the order. */
+ if (!secp256k1_scalar_is_zero(&s)) {
+ secp256k1_scalar inv;
+#ifndef USE_NUM_NONE
+ secp256k1_num invnum;
+ secp256k1_num invnum2;
+#endif
+ secp256k1_scalar_inverse(&inv, &s);
+#ifndef USE_NUM_NONE
+ secp256k1_num_mod_inverse(&invnum, &snum, &order);
+ secp256k1_scalar_get_num(&invnum2, &inv);
+ CHECK(secp256k1_num_eq(&invnum, &invnum2));
+#endif
+ secp256k1_scalar_mul(&inv, &inv, &s);
+ /* Multiplying a scalar with its inverse must result in one. */
+ CHECK(secp256k1_scalar_is_one(&inv));
+ secp256k1_scalar_inverse(&inv, &inv);
+ /* Inverting one must result in one. */
+ CHECK(secp256k1_scalar_is_one(&inv));
+#ifndef USE_NUM_NONE
+ secp256k1_scalar_get_num(&invnum, &inv);
+ CHECK(secp256k1_num_is_one(&invnum));
+#endif
+ }
+ }
+
+ {
+ /* Test commutativity of add. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_add(&r2, &s2, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar b;
+ int i;
+ /* Test add_bit. */
+ int bit = secp256k1_rand_bits(8);
+ secp256k1_scalar_set_int(&b, 1);
+ CHECK(secp256k1_scalar_is_one(&b));
+ for (i = 0; i < bit; i++) {
+ secp256k1_scalar_add(&b, &b, &b);
+ }
+ r1 = s1;
+ r2 = s1;
+ if (!secp256k1_scalar_add(&r1, &r1, &b)) {
+ /* No overflow happened. */
+ secp256k1_scalar_cadd_bit(&r2, bit, 1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ /* cadd is a noop when flag is zero */
+ secp256k1_scalar_cadd_bit(&r2, bit, 0);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+ }
+
+ {
+ /* Test commutativity of mul. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_mul(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r2, &s2, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test associativity of add. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_add(&r1, &r1, &s);
+ secp256k1_scalar_add(&r2, &s2, &s);
+ secp256k1_scalar_add(&r2, &s1, &r2);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test associativity of mul. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_mul(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r1, &r1, &s);
+ secp256k1_scalar_mul(&r2, &s2, &s);
+ secp256k1_scalar_mul(&r2, &s1, &r2);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test distributitivity of mul over add. */
+ secp256k1_scalar r1, r2, t;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r1, &r1, &s);
+ secp256k1_scalar_mul(&r2, &s1, &s);
+ secp256k1_scalar_mul(&t, &s2, &s);
+ secp256k1_scalar_add(&r2, &r2, &t);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test square. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_sqr(&r1, &s1);
+ secp256k1_scalar_mul(&r2, &s1, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test multiplicative identity. */
+ secp256k1_scalar r1, v1;
+ secp256k1_scalar_set_int(&v1,1);
+ secp256k1_scalar_mul(&r1, &s1, &v1);
+ CHECK(secp256k1_scalar_eq(&r1, &s1));
+ }
+
+ {
+ /* Test additive identity. */
+ secp256k1_scalar r1, v0;
+ secp256k1_scalar_set_int(&v0,0);
+ secp256k1_scalar_add(&r1, &s1, &v0);
+ CHECK(secp256k1_scalar_eq(&r1, &s1));
+ }
+
+ {
+ /* Test zero product property. */
+ secp256k1_scalar r1, v0;
+ secp256k1_scalar_set_int(&v0,0);
+ secp256k1_scalar_mul(&r1, &s1, &v0);
+ CHECK(secp256k1_scalar_eq(&r1, &v0));
+ }
+
+}
+
+void scalar_chacha_tests(void) {
+ unsigned char expected1[64] = {
+ 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90,
+ 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
+ 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a,
+ 0xa8, 0x36, 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7,
+ 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
+ 0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37,
+ 0x6a, 0x43, 0xb8, 0xf4, 0x15, 0x18, 0xa1, 0x1c,
+ 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86
+ };
+ unsigned char expected2[64] = {
+ 0x45, 0x40, 0xf0, 0x5a, 0x9f, 0x1f, 0xb2, 0x96,
+ 0xd7, 0x73, 0x6e, 0x7b, 0x20, 0x8e, 0x3c, 0x96,
+ 0xeb, 0x4f, 0xe1, 0x83, 0x46, 0x88, 0xd2, 0x60,
+ 0x4f, 0x45, 0x09, 0x52, 0xed, 0x43, 0x2d, 0x41,
+ 0xbb, 0xe2, 0xa0, 0xb6, 0xea, 0x75, 0x66, 0xd2,
+ 0xa5, 0xd1, 0xe7, 0xe2, 0x0d, 0x42, 0xaf, 0x2c,
+ 0x53, 0xd7, 0x92, 0xb1, 0xc4, 0x3f, 0xea, 0x81,
+ 0x7e, 0x9a, 0xd2, 0x75, 0xae, 0x54, 0x69, 0x63
+ };
+ unsigned char expected3[64] = {
+ 0x47, 0x4a, 0x4f, 0x35, 0x4f, 0xee, 0x93, 0x59,
+ 0xbb, 0x65, 0x81, 0xe5, 0xd9, 0x15, 0xa6, 0x01,
+ 0xb6, 0x8c, 0x68, 0x03, 0x38, 0xff, 0x65, 0xe6,
+ 0x56, 0x4a, 0x3e, 0x65, 0x59, 0xfc, 0x12, 0x3f,
+ 0xa9, 0xb2, 0xf9, 0x3e, 0x57, 0xc3, 0xa5, 0xcb,
+ 0xe0, 0x72, 0x74, 0x27, 0x88, 0x1c, 0x23, 0xdf,
+ 0xe2, 0xb6, 0xcc, 0xfb, 0x93, 0xed, 0xcb, 0x02,
+ 0xd7, 0x50, 0x52, 0x45, 0x84, 0x88, 0xbb, 0xea
+ };
+
+ secp256k1_scalar exp_r1, exp_r2;
+ secp256k1_scalar r1, r2;
+ unsigned char seed1[32] = { 0 };
+
+ secp256k1_scalar_chacha20(&r1, &r2, seed1, 0);
+ secp256k1_scalar_set_b32(&exp_r1, &expected1[0], NULL);
+ secp256k1_scalar_set_b32(&exp_r2, &expected1[32], NULL);
+ CHECK(secp256k1_scalar_eq(&exp_r1, &r1));
+ CHECK(secp256k1_scalar_eq(&exp_r2, &r2));
+
+ seed1[31] = 1;
+ secp256k1_scalar_chacha20(&r1, &r2, seed1, 0);
+ secp256k1_scalar_set_b32(&exp_r1, &expected2[0], NULL);
+ secp256k1_scalar_set_b32(&exp_r2, &expected2[32], NULL);
+ CHECK(secp256k1_scalar_eq(&exp_r1, &r1));
+ CHECK(secp256k1_scalar_eq(&exp_r2, &r2));
+
+ secp256k1_scalar_chacha20(&r1, &r2, seed1, 100);
+ secp256k1_scalar_set_b32(&exp_r1, &expected3[0], NULL);
+ secp256k1_scalar_set_b32(&exp_r2, &expected3[32], NULL);
+ CHECK(secp256k1_scalar_eq(&exp_r1, &r1));
+ CHECK(secp256k1_scalar_eq(&exp_r2, &r2));
+}
+
+void run_scalar_tests(void) {
+ int i;
+ for (i = 0; i < 128 * count; i++) {
+ scalar_test();
+ }
+
+ scalar_chacha_tests();
+
+ {
+ /* (-1)+1 should be zero. */
+ secp256k1_scalar s, o;
+ secp256k1_scalar_set_int(&s, 1);
+ CHECK(secp256k1_scalar_is_one(&s));
+ secp256k1_scalar_negate(&o, &s);
+ secp256k1_scalar_add(&o, &o, &s);
+ CHECK(secp256k1_scalar_is_zero(&o));
+ secp256k1_scalar_negate(&o, &o);
+ CHECK(secp256k1_scalar_is_zero(&o));
+ }
+
+#ifndef USE_NUM_NONE
+ {
+ /* A scalar with value of the curve order should be 0. */
+ secp256k1_num order;
+ secp256k1_scalar zero;
+ unsigned char bin[32];
+ int overflow = 0;
+ secp256k1_scalar_order_get_num(&order);
+ secp256k1_num_get_bin(bin, 32, &order);
+ secp256k1_scalar_set_b32(&zero, bin, &overflow);
+ CHECK(overflow == 1);
+ CHECK(secp256k1_scalar_is_zero(&zero));
+ }
+#endif
+
+ {
+ /* Does check_overflow check catch all ones? */
+ static const secp256k1_scalar overflowed = SECP256K1_SCALAR_CONST(
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL
+ );
+ CHECK(secp256k1_scalar_check_overflow(&overflowed));
+ }
+
+ {
+ /* Static test vectors.
+ * These were reduced from ~10^12 random vectors based on comparison-decision
+ * and edge-case coverage on 32-bit and 64-bit implementations.
+ * The responses were generated with Sage 5.9.
+ */
+ secp256k1_scalar x;
+ secp256k1_scalar y;
+ secp256k1_scalar z;
+ secp256k1_scalar zz;
+ secp256k1_scalar one;
+ secp256k1_scalar r1;
+ secp256k1_scalar r2;
+#if defined(USE_SCALAR_INV_NUM)
+ secp256k1_scalar zzv;
+#endif
+ int overflow;
+ unsigned char chal[33][2][32] = {
+ {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x00, 0xc0, 0xff, 0xff, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff}},
+ {{0xef, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x80, 0xff}},
+ {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0x3f, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0x00},
+ {0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x1e, 0xf8, 0xff, 0xff, 0xff, 0xfd, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f,
+ 0x00, 0x00, 0x00, 0xf8, 0xff, 0x03, 0x00, 0xe0,
+ 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xff,
+ 0xf3, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x1c, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00,
+ 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0x80, 0xff, 0xff, 0x3f,
+ 0x00, 0xfe, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0x00, 0x0f, 0xfc, 0x9f,
+ 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0x0f, 0xfc, 0xff, 0x7f, 0x00, 0x00, 0x00,
+ 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00},
+ {0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0xf8, 0xff, 0x0f, 0xc0, 0xff, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x07, 0x80, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0x00,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xf0},
+ {0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0x00, 0xf8, 0xff, 0x03, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0xc0, 0xff, 0x0f, 0xfc, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff,
+ 0xff, 0x01, 0x00, 0x00, 0x00, 0x3f, 0x00, 0xc0,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0x8f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x7f, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x80, 0xff, 0x7f},
+ {0xff, 0xcf, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0xc0, 0xff, 0xcf, 0xff, 0xff, 0xff, 0xff,
+ 0xbf, 0xff, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0x01, 0xfc, 0xff, 0x01, 0x00, 0xfe, 0xff},
+ {0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00}},
+ {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x7f, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xf8, 0xff, 0x01, 0x00, 0xf0, 0xff, 0xff,
+ 0xe0, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0x00},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xfc, 0xff, 0xff, 0x3f, 0xf0, 0xff, 0xff, 0x3f,
+ 0x00, 0x00, 0xf8, 0x07, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x0f, 0x7e, 0x00, 0x00}},
+ {{0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x1f, 0x00, 0x00, 0xfe, 0x07, 0x00},
+ {0x00, 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xfb, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60}},
+ {{0xff, 0x01, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x00,
+ 0x80, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0xff, 0xff, 0x1f, 0x00, 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, 0x00}},
+ {{0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
+ 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xc0, 0xff, 0xff, 0xcf, 0xff, 0x1f, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00},
+ {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0x7f, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00},
+ {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x80,
+ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0x7f, 0xf8, 0xff, 0xff, 0x1f, 0x00, 0xfe}},
+ {{0xff, 0xff, 0xff, 0x3f, 0xf8, 0xff, 0xff, 0xff,
+ 0xff, 0x03, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0x01, 0x80, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xc0,
+ 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00,
+ 0xf0, 0xff, 0xff, 0xff, 0xff, 0x07, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x01, 0xff, 0xff, 0xff}},
+ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x7e, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x07, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0xff, 0x01, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff,
+ 0xff, 0xff, 0x3f, 0x00, 0xf8, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x3f, 0x00, 0x00, 0xc0, 0xf1, 0x7f, 0x00}},
+ {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00},
+ {0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x80, 0x1f,
+ 0x00, 0x00, 0xfc, 0xff, 0xff, 0x01, 0xff, 0xff}},
+ {{0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0x03, 0xe0, 0x01,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0xfc, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00},
+ {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xfe, 0xff, 0xff, 0xf0, 0x07, 0x00, 0x3c, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x07, 0xe0, 0xff, 0x00, 0x00, 0x00}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x80, 0x00,
+ 0x00, 0x00, 0x00, 0xc0, 0x7f, 0xfe, 0xff, 0x1f,
+ 0x00, 0xfe, 0xff, 0x03, 0x00, 0x00, 0xfe, 0xff}},
+ {{0xff, 0xff, 0x81, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x83,
+ 0xff, 0xff, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0xf0},
+ {0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00,
+ 0xf8, 0x07, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xc7, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff}},
+ {{0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00,
+ 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb,
+ 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03},
+ {0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00,
+ 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb,
+ 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03}}
+ };
+ unsigned char res[33][2][32] = {
+ {{0x0c, 0x3b, 0x0a, 0xca, 0x8d, 0x1a, 0x2f, 0xb9,
+ 0x8a, 0x7b, 0x53, 0x5a, 0x1f, 0xc5, 0x22, 0xa1,
+ 0x07, 0x2a, 0x48, 0xea, 0x02, 0xeb, 0xb3, 0xd6,
+ 0x20, 0x1e, 0x86, 0xd0, 0x95, 0xf6, 0x92, 0x35},
+ {0xdc, 0x90, 0x7a, 0x07, 0x2e, 0x1e, 0x44, 0x6d,
+ 0xf8, 0x15, 0x24, 0x5b, 0x5a, 0x96, 0x37, 0x9c,
+ 0x37, 0x7b, 0x0d, 0xac, 0x1b, 0x65, 0x58, 0x49,
+ 0x43, 0xb7, 0x31, 0xbb, 0xa7, 0xf4, 0x97, 0x15}},
+ {{0xf1, 0xf7, 0x3a, 0x50, 0xe6, 0x10, 0xba, 0x22,
+ 0x43, 0x4d, 0x1f, 0x1f, 0x7c, 0x27, 0xca, 0x9c,
+ 0xb8, 0xb6, 0xa0, 0xfc, 0xd8, 0xc0, 0x05, 0x2f,
+ 0xf7, 0x08, 0xe1, 0x76, 0xdd, 0xd0, 0x80, 0xc8},
+ {0xe3, 0x80, 0x80, 0xb8, 0xdb, 0xe3, 0xa9, 0x77,
+ 0x00, 0xb0, 0xf5, 0x2e, 0x27, 0xe2, 0x68, 0xc4,
+ 0x88, 0xe8, 0x04, 0xc1, 0x12, 0xbf, 0x78, 0x59,
+ 0xe6, 0xa9, 0x7c, 0xe1, 0x81, 0xdd, 0xb9, 0xd5}},
+ {{0x96, 0xe2, 0xee, 0x01, 0xa6, 0x80, 0x31, 0xef,
+ 0x5c, 0xd0, 0x19, 0xb4, 0x7d, 0x5f, 0x79, 0xab,
+ 0xa1, 0x97, 0xd3, 0x7e, 0x33, 0xbb, 0x86, 0x55,
+ 0x60, 0x20, 0x10, 0x0d, 0x94, 0x2d, 0x11, 0x7c},
+ {0xcc, 0xab, 0xe0, 0xe8, 0x98, 0x65, 0x12, 0x96,
+ 0x38, 0x5a, 0x1a, 0xf2, 0x85, 0x23, 0x59, 0x5f,
+ 0xf9, 0xf3, 0xc2, 0x81, 0x70, 0x92, 0x65, 0x12,
+ 0x9c, 0x65, 0x1e, 0x96, 0x00, 0xef, 0xe7, 0x63}},
+ {{0xac, 0x1e, 0x62, 0xc2, 0x59, 0xfc, 0x4e, 0x5c,
+ 0x83, 0xb0, 0xd0, 0x6f, 0xce, 0x19, 0xf6, 0xbf,
+ 0xa4, 0xb0, 0xe0, 0x53, 0x66, 0x1f, 0xbf, 0xc9,
+ 0x33, 0x47, 0x37, 0xa9, 0x3d, 0x5d, 0xb0, 0x48},
+ {0x86, 0xb9, 0x2a, 0x7f, 0x8e, 0xa8, 0x60, 0x42,
+ 0x26, 0x6d, 0x6e, 0x1c, 0xa2, 0xec, 0xe0, 0xe5,
+ 0x3e, 0x0a, 0x33, 0xbb, 0x61, 0x4c, 0x9f, 0x3c,
+ 0xd1, 0xdf, 0x49, 0x33, 0xcd, 0x72, 0x78, 0x18}},
+ {{0xf7, 0xd3, 0xcd, 0x49, 0x5c, 0x13, 0x22, 0xfb,
+ 0x2e, 0xb2, 0x2f, 0x27, 0xf5, 0x8a, 0x5d, 0x74,
+ 0xc1, 0x58, 0xc5, 0xc2, 0x2d, 0x9f, 0x52, 0xc6,
+ 0x63, 0x9f, 0xba, 0x05, 0x76, 0x45, 0x7a, 0x63},
+ {0x8a, 0xfa, 0x55, 0x4d, 0xdd, 0xa3, 0xb2, 0xc3,
+ 0x44, 0xfd, 0xec, 0x72, 0xde, 0xef, 0xc0, 0x99,
+ 0xf5, 0x9f, 0xe2, 0x52, 0xb4, 0x05, 0x32, 0x58,
+ 0x57, 0xc1, 0x8f, 0xea, 0xc3, 0x24, 0x5b, 0x94}},
+ {{0x05, 0x83, 0xee, 0xdd, 0x64, 0xf0, 0x14, 0x3b,
+ 0xa0, 0x14, 0x4a, 0x3a, 0x41, 0x82, 0x7c, 0xa7,
+ 0x2c, 0xaa, 0xb1, 0x76, 0xbb, 0x59, 0x64, 0x5f,
+ 0x52, 0xad, 0x25, 0x29, 0x9d, 0x8f, 0x0b, 0xb0},
+ {0x7e, 0xe3, 0x7c, 0xca, 0xcd, 0x4f, 0xb0, 0x6d,
+ 0x7a, 0xb2, 0x3e, 0xa0, 0x08, 0xb9, 0xa8, 0x2d,
+ 0xc2, 0xf4, 0x99, 0x66, 0xcc, 0xac, 0xd8, 0xb9,
+ 0x72, 0x2a, 0x4a, 0x3e, 0x0f, 0x7b, 0xbf, 0xf4}},
+ {{0x8c, 0x9c, 0x78, 0x2b, 0x39, 0x61, 0x7e, 0xf7,
+ 0x65, 0x37, 0x66, 0x09, 0x38, 0xb9, 0x6f, 0x70,
+ 0x78, 0x87, 0xff, 0xcf, 0x93, 0xca, 0x85, 0x06,
+ 0x44, 0x84, 0xa7, 0xfe, 0xd3, 0xa4, 0xe3, 0x7e},
+ {0xa2, 0x56, 0x49, 0x23, 0x54, 0xa5, 0x50, 0xe9,
+ 0x5f, 0xf0, 0x4d, 0xe7, 0xdc, 0x38, 0x32, 0x79,
+ 0x4f, 0x1c, 0xb7, 0xe4, 0xbb, 0xf8, 0xbb, 0x2e,
+ 0x40, 0x41, 0x4b, 0xcc, 0xe3, 0x1e, 0x16, 0x36}},
+ {{0x0c, 0x1e, 0xd7, 0x09, 0x25, 0x40, 0x97, 0xcb,
+ 0x5c, 0x46, 0xa8, 0xda, 0xef, 0x25, 0xd5, 0xe5,
+ 0x92, 0x4d, 0xcf, 0xa3, 0xc4, 0x5d, 0x35, 0x4a,
+ 0xe4, 0x61, 0x92, 0xf3, 0xbf, 0x0e, 0xcd, 0xbe},
+ {0xe4, 0xaf, 0x0a, 0xb3, 0x30, 0x8b, 0x9b, 0x48,
+ 0x49, 0x43, 0xc7, 0x64, 0x60, 0x4a, 0x2b, 0x9e,
+ 0x95, 0x5f, 0x56, 0xe8, 0x35, 0xdc, 0xeb, 0xdc,
+ 0xc7, 0xc4, 0xfe, 0x30, 0x40, 0xc7, 0xbf, 0xa4}},
+ {{0xd4, 0xa0, 0xf5, 0x81, 0x49, 0x6b, 0xb6, 0x8b,
+ 0x0a, 0x69, 0xf9, 0xfe, 0xa8, 0x32, 0xe5, 0xe0,
+ 0xa5, 0xcd, 0x02, 0x53, 0xf9, 0x2c, 0xe3, 0x53,
+ 0x83, 0x36, 0xc6, 0x02, 0xb5, 0xeb, 0x64, 0xb8},
+ {0x1d, 0x42, 0xb9, 0xf9, 0xe9, 0xe3, 0x93, 0x2c,
+ 0x4c, 0xee, 0x6c, 0x5a, 0x47, 0x9e, 0x62, 0x01,
+ 0x6b, 0x04, 0xfe, 0xa4, 0x30, 0x2b, 0x0d, 0x4f,
+ 0x71, 0x10, 0xd3, 0x55, 0xca, 0xf3, 0x5e, 0x80}},
+ {{0x77, 0x05, 0xf6, 0x0c, 0x15, 0x9b, 0x45, 0xe7,
+ 0xb9, 0x11, 0xb8, 0xf5, 0xd6, 0xda, 0x73, 0x0c,
+ 0xda, 0x92, 0xea, 0xd0, 0x9d, 0xd0, 0x18, 0x92,
+ 0xce, 0x9a, 0xaa, 0xee, 0x0f, 0xef, 0xde, 0x30},
+ {0xf1, 0xf1, 0xd6, 0x9b, 0x51, 0xd7, 0x77, 0x62,
+ 0x52, 0x10, 0xb8, 0x7a, 0x84, 0x9d, 0x15, 0x4e,
+ 0x07, 0xdc, 0x1e, 0x75, 0x0d, 0x0c, 0x3b, 0xdb,
+ 0x74, 0x58, 0x62, 0x02, 0x90, 0x54, 0x8b, 0x43}},
+ {{0xa6, 0xfe, 0x0b, 0x87, 0x80, 0x43, 0x67, 0x25,
+ 0x57, 0x5d, 0xec, 0x40, 0x50, 0x08, 0xd5, 0x5d,
+ 0x43, 0xd7, 0xe0, 0xaa, 0xe0, 0x13, 0xb6, 0xb0,
+ 0xc0, 0xd4, 0xe5, 0x0d, 0x45, 0x83, 0xd6, 0x13},
+ {0x40, 0x45, 0x0a, 0x92, 0x31, 0xea, 0x8c, 0x60,
+ 0x8c, 0x1f, 0xd8, 0x76, 0x45, 0xb9, 0x29, 0x00,
+ 0x26, 0x32, 0xd8, 0xa6, 0x96, 0x88, 0xe2, 0xc4,
+ 0x8b, 0xdb, 0x7f, 0x17, 0x87, 0xcc, 0xc8, 0xf2}},
+ {{0xc2, 0x56, 0xe2, 0xb6, 0x1a, 0x81, 0xe7, 0x31,
+ 0x63, 0x2e, 0xbb, 0x0d, 0x2f, 0x81, 0x67, 0xd4,
+ 0x22, 0xe2, 0x38, 0x02, 0x25, 0x97, 0xc7, 0x88,
+ 0x6e, 0xdf, 0xbe, 0x2a, 0xa5, 0x73, 0x63, 0xaa},
+ {0x50, 0x45, 0xe2, 0xc3, 0xbd, 0x89, 0xfc, 0x57,
+ 0xbd, 0x3c, 0xa3, 0x98, 0x7e, 0x7f, 0x36, 0x38,
+ 0x92, 0x39, 0x1f, 0x0f, 0x81, 0x1a, 0x06, 0x51,
+ 0x1f, 0x8d, 0x6a, 0xff, 0x47, 0x16, 0x06, 0x9c}},
+ {{0x33, 0x95, 0xa2, 0x6f, 0x27, 0x5f, 0x9c, 0x9c,
+ 0x64, 0x45, 0xcb, 0xd1, 0x3c, 0xee, 0x5e, 0x5f,
+ 0x48, 0xa6, 0xaf, 0xe3, 0x79, 0xcf, 0xb1, 0xe2,
+ 0xbf, 0x55, 0x0e, 0xa2, 0x3b, 0x62, 0xf0, 0xe4},
+ {0x14, 0xe8, 0x06, 0xe3, 0xbe, 0x7e, 0x67, 0x01,
+ 0xc5, 0x21, 0x67, 0xd8, 0x54, 0xb5, 0x7f, 0xa4,
+ 0xf9, 0x75, 0x70, 0x1c, 0xfd, 0x79, 0xdb, 0x86,
+ 0xad, 0x37, 0x85, 0x83, 0x56, 0x4e, 0xf0, 0xbf}},
+ {{0xbc, 0xa6, 0xe0, 0x56, 0x4e, 0xef, 0xfa, 0xf5,
+ 0x1d, 0x5d, 0x3f, 0x2a, 0x5b, 0x19, 0xab, 0x51,
+ 0xc5, 0x8b, 0xdd, 0x98, 0x28, 0x35, 0x2f, 0xc3,
+ 0x81, 0x4f, 0x5c, 0xe5, 0x70, 0xb9, 0xeb, 0x62},
+ {0xc4, 0x6d, 0x26, 0xb0, 0x17, 0x6b, 0xfe, 0x6c,
+ 0x12, 0xf8, 0xe7, 0xc1, 0xf5, 0x2f, 0xfa, 0x91,
+ 0x13, 0x27, 0xbd, 0x73, 0xcc, 0x33, 0x31, 0x1c,
+ 0x39, 0xe3, 0x27, 0x6a, 0x95, 0xcf, 0xc5, 0xfb}},
+ {{0x30, 0xb2, 0x99, 0x84, 0xf0, 0x18, 0x2a, 0x6e,
+ 0x1e, 0x27, 0xed, 0xa2, 0x29, 0x99, 0x41, 0x56,
+ 0xe8, 0xd4, 0x0d, 0xef, 0x99, 0x9c, 0xf3, 0x58,
+ 0x29, 0x55, 0x1a, 0xc0, 0x68, 0xd6, 0x74, 0xa4},
+ {0x07, 0x9c, 0xe7, 0xec, 0xf5, 0x36, 0x73, 0x41,
+ 0xa3, 0x1c, 0xe5, 0x93, 0x97, 0x6a, 0xfd, 0xf7,
+ 0x53, 0x18, 0xab, 0xaf, 0xeb, 0x85, 0xbd, 0x92,
+ 0x90, 0xab, 0x3c, 0xbf, 0x30, 0x82, 0xad, 0xf6}},
+ {{0xc6, 0x87, 0x8a, 0x2a, 0xea, 0xc0, 0xa9, 0xec,
+ 0x6d, 0xd3, 0xdc, 0x32, 0x23, 0xce, 0x62, 0x19,
+ 0xa4, 0x7e, 0xa8, 0xdd, 0x1c, 0x33, 0xae, 0xd3,
+ 0x4f, 0x62, 0x9f, 0x52, 0xe7, 0x65, 0x46, 0xf4},
+ {0x97, 0x51, 0x27, 0x67, 0x2d, 0xa2, 0x82, 0x87,
+ 0x98, 0xd3, 0xb6, 0x14, 0x7f, 0x51, 0xd3, 0x9a,
+ 0x0b, 0xd0, 0x76, 0x81, 0xb2, 0x4f, 0x58, 0x92,
+ 0xa4, 0x86, 0xa1, 0xa7, 0x09, 0x1d, 0xef, 0x9b}},
+ {{0xb3, 0x0f, 0x2b, 0x69, 0x0d, 0x06, 0x90, 0x64,
+ 0xbd, 0x43, 0x4c, 0x10, 0xe8, 0x98, 0x1c, 0xa3,
+ 0xe1, 0x68, 0xe9, 0x79, 0x6c, 0x29, 0x51, 0x3f,
+ 0x41, 0xdc, 0xdf, 0x1f, 0xf3, 0x60, 0xbe, 0x33},
+ {0xa1, 0x5f, 0xf7, 0x1d, 0xb4, 0x3e, 0x9b, 0x3c,
+ 0xe7, 0xbd, 0xb6, 0x06, 0xd5, 0x60, 0x06, 0x6d,
+ 0x50, 0xd2, 0xf4, 0x1a, 0x31, 0x08, 0xf2, 0xea,
+ 0x8e, 0xef, 0x5f, 0x7d, 0xb6, 0xd0, 0xc0, 0x27}},
+ {{0x62, 0x9a, 0xd9, 0xbb, 0x38, 0x36, 0xce, 0xf7,
+ 0x5d, 0x2f, 0x13, 0xec, 0xc8, 0x2d, 0x02, 0x8a,
+ 0x2e, 0x72, 0xf0, 0xe5, 0x15, 0x9d, 0x72, 0xae,
+ 0xfc, 0xb3, 0x4f, 0x02, 0xea, 0xe1, 0x09, 0xfe},
+ {0x00, 0x00, 0x00, 0x00, 0xfa, 0x0a, 0x3d, 0xbc,
+ 0xad, 0x16, 0x0c, 0xb6, 0xe7, 0x7c, 0x8b, 0x39,
+ 0x9a, 0x43, 0xbb, 0xe3, 0xc2, 0x55, 0x15, 0x14,
+ 0x75, 0xac, 0x90, 0x9b, 0x7f, 0x9a, 0x92, 0x00}},
+ {{0x8b, 0xac, 0x70, 0x86, 0x29, 0x8f, 0x00, 0x23,
+ 0x7b, 0x45, 0x30, 0xaa, 0xb8, 0x4c, 0xc7, 0x8d,
+ 0x4e, 0x47, 0x85, 0xc6, 0x19, 0xe3, 0x96, 0xc2,
+ 0x9a, 0xa0, 0x12, 0xed, 0x6f, 0xd7, 0x76, 0x16},
+ {0x45, 0xaf, 0x7e, 0x33, 0xc7, 0x7f, 0x10, 0x6c,
+ 0x7c, 0x9f, 0x29, 0xc1, 0xa8, 0x7e, 0x15, 0x84,
+ 0xe7, 0x7d, 0xc0, 0x6d, 0xab, 0x71, 0x5d, 0xd0,
+ 0x6b, 0x9f, 0x97, 0xab, 0xcb, 0x51, 0x0c, 0x9f}},
+ {{0x9e, 0xc3, 0x92, 0xb4, 0x04, 0x9f, 0xc8, 0xbb,
+ 0xdd, 0x9e, 0xc6, 0x05, 0xfd, 0x65, 0xec, 0x94,
+ 0x7f, 0x2c, 0x16, 0xc4, 0x40, 0xac, 0x63, 0x7b,
+ 0x7d, 0xb8, 0x0c, 0xe4, 0x5b, 0xe3, 0xa7, 0x0e},
+ {0x43, 0xf4, 0x44, 0xe8, 0xcc, 0xc8, 0xd4, 0x54,
+ 0x33, 0x37, 0x50, 0xf2, 0x87, 0x42, 0x2e, 0x00,
+ 0x49, 0x60, 0x62, 0x02, 0xfd, 0x1a, 0x7c, 0xdb,
+ 0x29, 0x6c, 0x6d, 0x54, 0x53, 0x08, 0xd1, 0xc8}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}},
+ {{0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1,
+ 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0,
+ 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59,
+ 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92},
+ {0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1,
+ 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0,
+ 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59,
+ 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}},
+ {{0x28, 0x56, 0xac, 0x0e, 0x4f, 0x98, 0x09, 0xf0,
+ 0x49, 0xfa, 0x7f, 0x84, 0xac, 0x7e, 0x50, 0x5b,
+ 0x17, 0x43, 0x14, 0x89, 0x9c, 0x53, 0xa8, 0x94,
+ 0x30, 0xf2, 0x11, 0x4d, 0x92, 0x14, 0x27, 0xe8},
+ {0x39, 0x7a, 0x84, 0x56, 0x79, 0x9d, 0xec, 0x26,
+ 0x2c, 0x53, 0xc1, 0x94, 0xc9, 0x8d, 0x9e, 0x9d,
+ 0x32, 0x1f, 0xdd, 0x84, 0x04, 0xe8, 0xe2, 0x0a,
+ 0x6b, 0xbe, 0xbb, 0x42, 0x40, 0x67, 0x30, 0x6c}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4,
+ 0x40, 0x2d, 0xa1, 0x73, 0x2f, 0xc9, 0xbe, 0xbd},
+ {0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1,
+ 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0,
+ 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59,
+ 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}},
+ {{0x1c, 0xc4, 0xf7, 0xda, 0x0f, 0x65, 0xca, 0x39,
+ 0x70, 0x52, 0x92, 0x8e, 0xc3, 0xc8, 0x15, 0xea,
+ 0x7f, 0x10, 0x9e, 0x77, 0x4b, 0x6e, 0x2d, 0xdf,
+ 0xe8, 0x30, 0x9d, 0xda, 0xe8, 0x9a, 0x65, 0xae},
+ {0x02, 0xb0, 0x16, 0xb1, 0x1d, 0xc8, 0x57, 0x7b,
+ 0xa2, 0x3a, 0xa2, 0xa3, 0x38, 0x5c, 0x8f, 0xeb,
+ 0x66, 0x37, 0x91, 0xa8, 0x5f, 0xef, 0x04, 0xf6,
+ 0x59, 0x75, 0xe1, 0xee, 0x92, 0xf6, 0x0e, 0x30}},
+ {{0x8d, 0x76, 0x14, 0xa4, 0x14, 0x06, 0x9f, 0x9a,
+ 0xdf, 0x4a, 0x85, 0xa7, 0x6b, 0xbf, 0x29, 0x6f,
+ 0xbc, 0x34, 0x87, 0x5d, 0xeb, 0xbb, 0x2e, 0xa9,
+ 0xc9, 0x1f, 0x58, 0xd6, 0x9a, 0x82, 0xa0, 0x56},
+ {0xd4, 0xb9, 0xdb, 0x88, 0x1d, 0x04, 0xe9, 0x93,
+ 0x8d, 0x3f, 0x20, 0xd5, 0x86, 0xa8, 0x83, 0x07,
+ 0xdb, 0x09, 0xd8, 0x22, 0x1f, 0x7f, 0xf1, 0x71,
+ 0xc8, 0xe7, 0x5d, 0x47, 0xaf, 0x8b, 0x72, 0xe9}},
+ {{0x83, 0xb9, 0x39, 0xb2, 0xa4, 0xdf, 0x46, 0x87,
+ 0xc2, 0xb8, 0xf1, 0xe6, 0x4c, 0xd1, 0xe2, 0xa9,
+ 0xe4, 0x70, 0x30, 0x34, 0xbc, 0x52, 0x7c, 0x55,
+ 0xa6, 0xec, 0x80, 0xa4, 0xe5, 0xd2, 0xdc, 0x73},
+ {0x08, 0xf1, 0x03, 0xcf, 0x16, 0x73, 0xe8, 0x7d,
+ 0xb6, 0x7e, 0x9b, 0xc0, 0xb4, 0xc2, 0xa5, 0x86,
+ 0x02, 0x77, 0xd5, 0x27, 0x86, 0xa5, 0x15, 0xfb,
+ 0xae, 0x9b, 0x8c, 0xa9, 0xf9, 0xf8, 0xa8, 0x4a}},
+ {{0x8b, 0x00, 0x49, 0xdb, 0xfa, 0xf0, 0x1b, 0xa2,
+ 0xed, 0x8a, 0x9a, 0x7a, 0x36, 0x78, 0x4a, 0xc7,
+ 0xf7, 0xad, 0x39, 0xd0, 0x6c, 0x65, 0x7a, 0x41,
+ 0xce, 0xd6, 0xd6, 0x4c, 0x20, 0x21, 0x6b, 0xc7},
+ {0xc6, 0xca, 0x78, 0x1d, 0x32, 0x6c, 0x6c, 0x06,
+ 0x91, 0xf2, 0x1a, 0xe8, 0x43, 0x16, 0xea, 0x04,
+ 0x3c, 0x1f, 0x07, 0x85, 0xf7, 0x09, 0x22, 0x08,
+ 0xba, 0x13, 0xfd, 0x78, 0x1e, 0x3f, 0x6f, 0x62}},
+ {{0x25, 0x9b, 0x7c, 0xb0, 0xac, 0x72, 0x6f, 0xb2,
+ 0xe3, 0x53, 0x84, 0x7a, 0x1a, 0x9a, 0x98, 0x9b,
+ 0x44, 0xd3, 0x59, 0xd0, 0x8e, 0x57, 0x41, 0x40,
+ 0x78, 0xa7, 0x30, 0x2f, 0x4c, 0x9c, 0xb9, 0x68},
+ {0xb7, 0x75, 0x03, 0x63, 0x61, 0xc2, 0x48, 0x6e,
+ 0x12, 0x3d, 0xbf, 0x4b, 0x27, 0xdf, 0xb1, 0x7a,
+ 0xff, 0x4e, 0x31, 0x07, 0x83, 0xf4, 0x62, 0x5b,
+ 0x19, 0xa5, 0xac, 0xa0, 0x32, 0x58, 0x0d, 0xa7}},
+ {{0x43, 0x4f, 0x10, 0xa4, 0xca, 0xdb, 0x38, 0x67,
+ 0xfa, 0xae, 0x96, 0xb5, 0x6d, 0x97, 0xff, 0x1f,
+ 0xb6, 0x83, 0x43, 0xd3, 0xa0, 0x2d, 0x70, 0x7a,
+ 0x64, 0x05, 0x4c, 0xa7, 0xc1, 0xa5, 0x21, 0x51},
+ {0xe4, 0xf1, 0x23, 0x84, 0xe1, 0xb5, 0x9d, 0xf2,
+ 0xb8, 0x73, 0x8b, 0x45, 0x2b, 0x35, 0x46, 0x38,
+ 0x10, 0x2b, 0x50, 0xf8, 0x8b, 0x35, 0xcd, 0x34,
+ 0xc8, 0x0e, 0xf6, 0xdb, 0x09, 0x35, 0xf0, 0xda}},
+ {{0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34,
+ 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13,
+ 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46,
+ 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5},
+ {0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34,
+ 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13,
+ 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46,
+ 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}}
+ };
+ secp256k1_scalar_set_int(&one, 1);
+ for (i = 0; i < 33; i++) {
+ secp256k1_scalar_set_b32(&x, chal[i][0], &overflow);
+ CHECK(!overflow);
+ secp256k1_scalar_set_b32(&y, chal[i][1], &overflow);
+ CHECK(!overflow);
+ secp256k1_scalar_set_b32(&r1, res[i][0], &overflow);
+ CHECK(!overflow);
+ secp256k1_scalar_set_b32(&r2, res[i][1], &overflow);
+ CHECK(!overflow);
+ secp256k1_scalar_mul(&z, &x, &y);
+ CHECK(!secp256k1_scalar_check_overflow(&z));
+ CHECK(secp256k1_scalar_eq(&r1, &z));
+ if (!secp256k1_scalar_is_zero(&y)) {
+ secp256k1_scalar_inverse(&zz, &y);
+ CHECK(!secp256k1_scalar_check_overflow(&zz));
+#if defined(USE_SCALAR_INV_NUM)
+ secp256k1_scalar_inverse_var(&zzv, &y);
+ CHECK(secp256k1_scalar_eq(&zzv, &zz));
+#endif
+ secp256k1_scalar_mul(&z, &z, &zz);
+ CHECK(!secp256k1_scalar_check_overflow(&z));
+ CHECK(secp256k1_scalar_eq(&x, &z));
+ secp256k1_scalar_mul(&zz, &zz, &y);
+ CHECK(!secp256k1_scalar_check_overflow(&zz));
+ CHECK(secp256k1_scalar_eq(&one, &zz));
+ }
+ secp256k1_scalar_mul(&z, &x, &x);
+ CHECK(!secp256k1_scalar_check_overflow(&z));
+ secp256k1_scalar_sqr(&zz, &x);
+ CHECK(!secp256k1_scalar_check_overflow(&zz));
+ CHECK(secp256k1_scalar_eq(&zz, &z));
+ CHECK(secp256k1_scalar_eq(&r2, &zz));
+ }
+ }
+}
+
+/***** FIELD TESTS *****/
+
+void random_fe(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ secp256k1_rand256(bin);
+ if (secp256k1_fe_set_b32(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+
+void random_fe_test(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ secp256k1_rand256_test(bin);
+ if (secp256k1_fe_set_b32(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+
+void random_fe_non_zero(secp256k1_fe *nz) {
+ int tries = 10;
+ while (--tries >= 0) {
+ random_fe(nz);
+ secp256k1_fe_normalize(nz);
+ if (!secp256k1_fe_is_zero(nz)) {
+ break;
+ }
+ }
+ /* Infinitesimal probability of spurious failure here */
+ CHECK(tries >= 0);
+}
+
+void random_fe_non_square(secp256k1_fe *ns) {
+ secp256k1_fe r;
+ random_fe_non_zero(ns);
+ if (secp256k1_fe_sqrt(&r, ns)) {
+ secp256k1_fe_negate(ns, ns, 1);
+ }
+}
+
+int check_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) {
+ secp256k1_fe an = *a;
+ secp256k1_fe bn = *b;
+ secp256k1_fe_normalize_weak(&an);
+ secp256k1_fe_normalize_var(&bn);
+ return secp256k1_fe_equal_var(&an, &bn);
+}
+
+int check_fe_inverse(const secp256k1_fe *a, const secp256k1_fe *ai) {
+ secp256k1_fe x;
+ secp256k1_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_fe_mul(&x, a, ai);
+ return check_fe_equal(&x, &one);
+}
+
+void run_field_convert(void) {
+ static const unsigned char b32[32] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
+ 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40
+ };
+ static const secp256k1_fe_storage fes = SECP256K1_FE_STORAGE_CONST(
+ 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL,
+ 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL
+ );
+ static const secp256k1_fe fe = SECP256K1_FE_CONST(
+ 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL,
+ 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL
+ );
+ secp256k1_fe fe2;
+ unsigned char b322[32];
+ secp256k1_fe_storage fes2;
+ /* Check conversions to fe. */
+ CHECK(secp256k1_fe_set_b32(&fe2, b32));
+ CHECK(secp256k1_fe_equal_var(&fe, &fe2));
+ secp256k1_fe_from_storage(&fe2, &fes);
+ CHECK(secp256k1_fe_equal_var(&fe, &fe2));
+ /* Check conversion from fe. */
+ secp256k1_fe_get_b32(b322, &fe);
+ CHECK(memcmp(b322, b32, 32) == 0);
+ secp256k1_fe_to_storage(&fes2, &fe);
+ CHECK(memcmp(&fes2, &fes, sizeof(fes)) == 0);
+}
+
+int fe_memcmp(const secp256k1_fe *a, const secp256k1_fe *b) {
+ secp256k1_fe t = *b;
+#ifdef VERIFY
+ t.magnitude = a->magnitude;
+ t.normalized = a->normalized;
+#endif
+ return memcmp(a, &t, sizeof(secp256k1_fe));
+}
+
+void run_field_misc(void) {
+ secp256k1_fe x;
+ secp256k1_fe y;
+ secp256k1_fe z;
+ secp256k1_fe q;
+ secp256k1_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5);
+ int i, j;
+ for (i = 0; i < 5*count; i++) {
+ secp256k1_fe_storage xs, ys, zs;
+ random_fe(&x);
+ random_fe_non_zero(&y);
+ /* Test the fe equality and comparison operations. */
+ CHECK(secp256k1_fe_cmp_var(&x, &x) == 0);
+ CHECK(secp256k1_fe_equal_var(&x, &x));
+ z = x;
+ secp256k1_fe_add(&z,&y);
+ /* Test fe conditional move; z is not normalized here. */
+ q = x;
+ secp256k1_fe_cmov(&x, &z, 0);
+ VERIFY_CHECK(!x.normalized && x.magnitude == z.magnitude);
+ secp256k1_fe_cmov(&x, &x, 1);
+ CHECK(fe_memcmp(&x, &z) != 0);
+ CHECK(fe_memcmp(&x, &q) == 0);
+ secp256k1_fe_cmov(&q, &z, 1);
+ VERIFY_CHECK(!q.normalized && q.magnitude == z.magnitude);
+ CHECK(fe_memcmp(&q, &z) == 0);
+ secp256k1_fe_normalize_var(&x);
+ secp256k1_fe_normalize_var(&z);
+ CHECK(!secp256k1_fe_equal_var(&x, &z));
+ secp256k1_fe_normalize_var(&q);
+ secp256k1_fe_cmov(&q, &z, (i&1));
+ VERIFY_CHECK(q.normalized && q.magnitude == 1);
+ for (j = 0; j < 6; j++) {
+ secp256k1_fe_negate(&z, &z, j+1);
+ secp256k1_fe_normalize_var(&q);
+ secp256k1_fe_cmov(&q, &z, (j&1));
+ VERIFY_CHECK(!q.normalized && q.magnitude == (j+2));
+ }
+ secp256k1_fe_normalize_var(&z);
+ /* Test storage conversion and conditional moves. */
+ secp256k1_fe_to_storage(&xs, &x);
+ secp256k1_fe_to_storage(&ys, &y);
+ secp256k1_fe_to_storage(&zs, &z);
+ secp256k1_fe_storage_cmov(&zs, &xs, 0);
+ secp256k1_fe_storage_cmov(&zs, &zs, 1);
+ CHECK(memcmp(&xs, &zs, sizeof(xs)) != 0);
+ secp256k1_fe_storage_cmov(&ys, &xs, 1);
+ CHECK(memcmp(&xs, &ys, sizeof(xs)) == 0);
+ secp256k1_fe_from_storage(&x, &xs);
+ secp256k1_fe_from_storage(&y, &ys);
+ secp256k1_fe_from_storage(&z, &zs);
+ /* Test that mul_int, mul, and add agree. */
+ secp256k1_fe_add(&y, &x);
+ secp256k1_fe_add(&y, &x);
+ z = x;
+ secp256k1_fe_mul_int(&z, 3);
+ CHECK(check_fe_equal(&y, &z));
+ secp256k1_fe_add(&y, &x);
+ secp256k1_fe_add(&z, &x);
+ CHECK(check_fe_equal(&z, &y));
+ z = x;
+ secp256k1_fe_mul_int(&z, 5);
+ secp256k1_fe_mul(&q, &x, &fe5);
+ CHECK(check_fe_equal(&z, &q));
+ secp256k1_fe_negate(&x, &x, 1);
+ secp256k1_fe_add(&z, &x);
+ secp256k1_fe_add(&q, &x);
+ CHECK(check_fe_equal(&y, &z));
+ CHECK(check_fe_equal(&q, &y));
+ }
+}
+
+void run_field_inv(void) {
+ secp256k1_fe x, xi, xii;
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ random_fe_non_zero(&x);
+ secp256k1_fe_inv(&xi, &x);
+ CHECK(check_fe_inverse(&x, &xi));
+ secp256k1_fe_inv(&xii, &xi);
+ CHECK(check_fe_equal(&x, &xii));
+ }
+}
+
+void run_field_inv_var(void) {
+ secp256k1_fe x, xi, xii;
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ random_fe_non_zero(&x);
+ secp256k1_fe_inv_var(&xi, &x);
+ CHECK(check_fe_inverse(&x, &xi));
+ secp256k1_fe_inv_var(&xii, &xi);
+ CHECK(check_fe_equal(&x, &xii));
+ }
+}
+
+void run_field_inv_all_var(void) {
+ secp256k1_fe x[16], xi[16], xii[16];
+ int i;
+ /* Check it's safe to call for 0 elements */
+ secp256k1_fe_inv_all_var(xi, x, 0);
+ for (i = 0; i < count; i++) {
+ size_t j;
+ size_t len = secp256k1_rand_int(15) + 1;
+ for (j = 0; j < len; j++) {
+ random_fe_non_zero(&x[j]);
+ }
+ secp256k1_fe_inv_all_var(xi, x, len);
+ for (j = 0; j < len; j++) {
+ CHECK(check_fe_inverse(&x[j], &xi[j]));
+ }
+ secp256k1_fe_inv_all_var(xii, xi, len);
+ for (j = 0; j < len; j++) {
+ CHECK(check_fe_equal(&x[j], &xii[j]));
+ }
+ }
+}
+
+void run_sqr(void) {
+ secp256k1_fe x, s;
+
+ {
+ int i;
+ secp256k1_fe_set_int(&x, 1);
+ secp256k1_fe_negate(&x, &x, 1);
+
+ for (i = 1; i <= 512; ++i) {
+ secp256k1_fe_mul_int(&x, 2);
+ secp256k1_fe_normalize(&x);
+ secp256k1_fe_sqr(&s, &x);
+ }
+ }
+}
+
+void test_sqrt(const secp256k1_fe *a, const secp256k1_fe *k) {
+ secp256k1_fe r1, r2;
+ int v = secp256k1_fe_sqrt(&r1, a);
+ CHECK((v == 0) == (k == NULL));
+
+ if (k != NULL) {
+ /* Check that the returned root is +/- the given known answer */
+ secp256k1_fe_negate(&r2, &r1, 1);
+ secp256k1_fe_add(&r1, k); secp256k1_fe_add(&r2, k);
+ secp256k1_fe_normalize(&r1); secp256k1_fe_normalize(&r2);
+ CHECK(secp256k1_fe_is_zero(&r1) || secp256k1_fe_is_zero(&r2));
+ }
+}
+
+void run_sqrt(void) {
+ secp256k1_fe ns, x, s, t;
+ int i;
+
+ /* Check sqrt(0) is 0 */
+ secp256k1_fe_set_int(&x, 0);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+
+ /* Check sqrt of small squares (and their negatives) */
+ for (i = 1; i <= 100; i++) {
+ secp256k1_fe_set_int(&x, i);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+ secp256k1_fe_negate(&t, &s, 1);
+ test_sqrt(&t, NULL);
+ }
+
+ /* Consistency checks for large random values */
+ for (i = 0; i < 10; i++) {
+ int j;
+ random_fe_non_square(&ns);
+ for (j = 0; j < count; j++) {
+ random_fe(&x);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+ secp256k1_fe_negate(&t, &s, 1);
+ test_sqrt(&t, NULL);
+ secp256k1_fe_mul(&t, &s, &ns);
+ test_sqrt(&t, NULL);
+ }
+ }
+}
+
+/***** GROUP TESTS *****/
+
+void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ CHECK(secp256k1_fe_equal_var(&a->x, &b->x));
+ CHECK(secp256k1_fe_equal_var(&a->y, &b->y));
+}
+
+/* This compares jacobian points including their Z, not just their geometric meaning. */
+int gej_xyz_equals_gej(const secp256k1_gej *a, const secp256k1_gej *b) {
+ secp256k1_gej a2;
+ secp256k1_gej b2;
+ int ret = 1;
+ ret &= a->infinity == b->infinity;
+ if (ret && !a->infinity) {
+ a2 = *a;
+ b2 = *b;
+ secp256k1_fe_normalize(&a2.x);
+ secp256k1_fe_normalize(&a2.y);
+ secp256k1_fe_normalize(&a2.z);
+ secp256k1_fe_normalize(&b2.x);
+ secp256k1_fe_normalize(&b2.y);
+ secp256k1_fe_normalize(&b2.z);
+ ret &= secp256k1_fe_cmp_var(&a2.x, &b2.x) == 0;
+ ret &= secp256k1_fe_cmp_var(&a2.y, &b2.y) == 0;
+ ret &= secp256k1_fe_cmp_var(&a2.z, &b2.z) == 0;
+ }
+ return ret;
+}
+
+void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
+ secp256k1_fe z2s;
+ secp256k1_fe u1, u2, s1, s2;
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
+ secp256k1_fe_sqr(&z2s, &b->z);
+ secp256k1_fe_mul(&u1, &a->x, &z2s);
+ u2 = b->x; secp256k1_fe_normalize_weak(&u2);
+ secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z);
+ s2 = b->y; secp256k1_fe_normalize_weak(&s2);
+ CHECK(secp256k1_fe_equal_var(&u1, &u2));
+ CHECK(secp256k1_fe_equal_var(&s1, &s2));
+}
+
+void test_ge(void) {
+ int i, i1;
+#ifdef USE_ENDOMORPHISM
+ int runs = 6;
+#else
+ int runs = 4;
+#endif
+ /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4).
+ * The second in each pair of identical points uses a random Z coordinate in the Jacobian form.
+ * All magnitudes are randomized.
+ * All 17*17 combinations of points are added to each other, using all applicable methods.
+ *
+ * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well.
+ */
+ secp256k1_ge *ge = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * (1 + 4 * runs));
+ secp256k1_gej *gej = (secp256k1_gej *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_gej) * (1 + 4 * runs));
+ secp256k1_fe *zinv = (secp256k1_fe *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_fe) * (1 + 4 * runs));
+ secp256k1_fe zf;
+ secp256k1_fe zfi2, zfi3;
+
+ secp256k1_gej_set_infinity(&gej[0]);
+ secp256k1_ge_clear(&ge[0]);
+ secp256k1_ge_set_gej_var(&ge[0], &gej[0]);
+ for (i = 0; i < runs; i++) {
+ int j;
+ secp256k1_ge g;
+ random_group_element_test(&g);
+#ifdef USE_ENDOMORPHISM
+ if (i >= runs - 2) {
+ secp256k1_ge_mul_lambda(&g, &ge[1]);
+ }
+ if (i >= runs - 1) {
+ secp256k1_ge_mul_lambda(&g, &g);
+ }
+#endif
+ ge[1 + 4 * i] = g;
+ ge[2 + 4 * i] = g;
+ secp256k1_ge_neg(&ge[3 + 4 * i], &g);
+ secp256k1_ge_neg(&ge[4 + 4 * i], &g);
+ secp256k1_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]);
+ random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]);
+ secp256k1_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]);
+ random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]);
+ for (j = 0; j < 4; j++) {
+ random_field_element_magnitude(&ge[1 + j + 4 * i].x);
+ random_field_element_magnitude(&ge[1 + j + 4 * i].y);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].x);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].y);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].z);
+ }
+ }
+
+ /* Compute z inverses. */
+ {
+ secp256k1_fe *zs = checked_malloc(&ctx->error_callback, sizeof(secp256k1_fe) * (1 + 4 * runs));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ if (i == 0) {
+ /* The point at infinity does not have a meaningful z inverse. Any should do. */
+ do {
+ random_field_element_test(&zs[i]);
+ } while(secp256k1_fe_is_zero(&zs[i]));
+ } else {
+ zs[i] = gej[i].z;
+ }
+ }
+ secp256k1_fe_inv_all_var(zinv, zs, 4 * runs + 1);
+ free(zs);
+ }
+
+ /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */
+ do {
+ random_field_element_test(&zf);
+ } while(secp256k1_fe_is_zero(&zf));
+ random_field_element_magnitude(&zf);
+ secp256k1_fe_inv_var(&zfi3, &zf);
+ secp256k1_fe_sqr(&zfi2, &zfi3);
+ secp256k1_fe_mul(&zfi3, &zfi3, &zfi2);
+
+ for (i1 = 0; i1 < 1 + 4 * runs; i1++) {
+ int i2;
+ for (i2 = 0; i2 < 1 + 4 * runs; i2++) {
+ /* Compute reference result using gej + gej (var). */
+ secp256k1_gej refj, resj;
+ secp256k1_ge ref;
+ secp256k1_fe zr;
+ secp256k1_gej_add_var(&refj, &gej[i1], &gej[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr);
+ /* Check Z ratio. */
+ if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&refj)) {
+ secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zrz, &refj.z));
+ }
+ secp256k1_ge_set_gej_var(&ref, &refj);
+
+ /* Test gej + ge with Z ratio result (var). */
+ secp256k1_gej_add_ge_var(&resj, &gej[i1], &ge[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr);
+ ge_equals_gej(&ref, &resj);
+ if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&resj)) {
+ secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zrz, &resj.z));
+ }
+
+ /* Test gej + ge (var, with additional Z factor). */
+ {
+ secp256k1_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */
+ secp256k1_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2);
+ secp256k1_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3);
+ random_field_element_magnitude(&ge2_zfi.x);
+ random_field_element_magnitude(&ge2_zfi.y);
+ secp256k1_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test gej + ge (const). */
+ if (i2 != 0) {
+ /* secp256k1_gej_add_ge does not support its second argument being infinity. */
+ secp256k1_gej_add_ge(&resj, &gej[i1], &ge[i2]);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test doubling (var). */
+ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) {
+ secp256k1_fe zr2;
+ /* Normal doubling with Z ratio result. */
+ secp256k1_gej_double_var(&resj, &gej[i1], &zr2);
+ ge_equals_gej(&ref, &resj);
+ /* Check Z ratio. */
+ secp256k1_fe_mul(&zr2, &zr2, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zr2, &resj.z));
+ /* Normal doubling. */
+ secp256k1_gej_double_var(&resj, &gej[i2], NULL);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test adding opposites. */
+ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) {
+ CHECK(secp256k1_ge_is_infinity(&ref));
+ }
+
+ /* Test adding infinity. */
+ if (i1 == 0) {
+ CHECK(secp256k1_ge_is_infinity(&ge[i1]));
+ CHECK(secp256k1_gej_is_infinity(&gej[i1]));
+ ge_equals_gej(&ref, &gej[i2]);
+ }
+ if (i2 == 0) {
+ CHECK(secp256k1_ge_is_infinity(&ge[i2]));
+ CHECK(secp256k1_gej_is_infinity(&gej[i2]));
+ ge_equals_gej(&ref, &gej[i1]);
+ }
+ }
+ }
+
+ /* Test adding all points together in random order equals infinity. */
+ {
+ secp256k1_gej sum = SECP256K1_GEJ_CONST_INFINITY;
+ secp256k1_gej *gej_shuffled = (secp256k1_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_gej));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ gej_shuffled[i] = gej[i];
+ }
+ for (i = 0; i < 4 * runs + 1; i++) {
+ int swap = i + secp256k1_rand_int(4 * runs + 1 - i);
+ if (swap != i) {
+ secp256k1_gej t = gej_shuffled[i];
+ gej_shuffled[i] = gej_shuffled[swap];
+ gej_shuffled[swap] = t;
+ }
+ }
+ for (i = 0; i < 4 * runs + 1; i++) {
+ secp256k1_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL);
+ }
+ CHECK(secp256k1_gej_is_infinity(&sum));
+ free(gej_shuffled);
+ }
+
+ /* Test batch gej -> ge conversion with and without known z ratios. */
+ {
+ secp256k1_fe *zr = (secp256k1_fe *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_fe));
+ secp256k1_ge *ge_set_table = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge));
+ secp256k1_ge *ge_set_all = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */
+ if (i < 4 * runs) {
+ secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z);
+ }
+ }
+ secp256k1_ge_set_table_gej_var(ge_set_table, gej, zr, 4 * runs + 1);
+ secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1, &ctx->error_callback);
+ for (i = 0; i < 4 * runs + 1; i++) {
+ secp256k1_fe s;
+ random_fe_non_zero(&s);
+ secp256k1_gej_rescale(&gej[i], &s);
+ ge_equals_gej(&ge_set_table[i], &gej[i]);
+ ge_equals_gej(&ge_set_all[i], &gej[i]);
+ }
+ free(ge_set_table);
+ free(ge_set_all);
+ free(zr);
+ }
+
+ free(ge);
+ free(gej);
+ free(zinv);
+}
+
+void test_add_neg_y_diff_x(void) {
+ /* The point of this test is to check that we can add two points
+ * whose y-coordinates are negatives of each other but whose x
+ * coordinates differ. If the x-coordinates were the same, these
+ * points would be negatives of each other and their sum is
+ * infinity. This is cool because it "covers up" any degeneracy
+ * in the addition algorithm that would cause the xy coordinates
+ * of the sum to be wrong (since infinity has no xy coordinates).
+ * HOWEVER, if the x-coordinates are different, infinity is the
+ * wrong answer, and such degeneracies are exposed. This is the
+ * root of https://github.com/bitcoin-core/secp256k1/issues/257
+ * which this test is a regression test for.
+ *
+ * These points were generated in sage as
+ * # secp256k1 params
+ * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
+ * C = EllipticCurve ([F (0), F (7)])
+ * G = C.lift_x(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798)
+ * N = FiniteField(G.order())
+ *
+ * # endomorphism values (lambda is 1^{1/3} in N, beta is 1^{1/3} in F)
+ * x = polygen(N)
+ * lam = (1 - x^3).roots()[1][0]
+ *
+ * # random "bad pair"
+ * P = C.random_element()
+ * Q = -int(lam) * P
+ * print " P: %x %x" % P.xy()
+ * print " Q: %x %x" % Q.xy()
+ * print "P + Q: %x %x" % (P + Q).xy()
+ */
+ secp256k1_gej aj = SECP256K1_GEJ_CONST(
+ 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30,
+ 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb,
+ 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8,
+ 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d
+ );
+ secp256k1_gej bj = SECP256K1_GEJ_CONST(
+ 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86,
+ 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7,
+ 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57,
+ 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2
+ );
+ secp256k1_gej sumj = SECP256K1_GEJ_CONST(
+ 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027,
+ 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a,
+ 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08,
+ 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe
+ );
+ secp256k1_ge b;
+ secp256k1_gej resj;
+ secp256k1_ge res;
+ secp256k1_ge_set_gej(&b, &bj);
+
+ secp256k1_gej_add_var(&resj, &aj, &bj, NULL);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+
+ secp256k1_gej_add_ge(&resj, &aj, &b);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+
+ secp256k1_gej_add_ge_var(&resj, &aj, &b, NULL);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+}
+
+void run_ge(void) {
+ int i;
+ for (i = 0; i < count * 32; i++) {
+ test_ge();
+ }
+ test_add_neg_y_diff_x();
+}
+
+void test_ec_combine(void) {
+ secp256k1_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_pubkey data[6];
+ const secp256k1_pubkey* d[6];
+ secp256k1_pubkey sd;
+ secp256k1_pubkey sd2;
+ secp256k1_gej Qj;
+ secp256k1_ge Q;
+ int i;
+ for (i = 1; i <= 6; i++) {
+ secp256k1_scalar s;
+ random_scalar_order_test(&s);
+ secp256k1_scalar_add(&sum, &sum, &s);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s);
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(&data[i - 1], &Q);
+ d[i - 1] = &data[i - 1];
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum);
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(&sd, &Q);
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1);
+ CHECK(memcmp(&sd, &sd2, sizeof(sd)) == 0);
+ }
+}
+
+void run_ec_combine(void) {
+ int i;
+ for (i = 0; i < count * 8; i++) {
+ test_ec_combine();
+ }
+}
+
+void test_group_decompress(const secp256k1_fe* x) {
+ /* The input itself, normalized. */
+ secp256k1_fe fex = *x;
+ secp256k1_fe fez;
+ /* Results of set_xquad_var, set_xo_var(..., 0), set_xo_var(..., 1). */
+ secp256k1_ge ge_quad, ge_even, ge_odd;
+ secp256k1_gej gej_quad;
+ /* Return values of the above calls. */
+ int res_quad, res_even, res_odd;
+
+ secp256k1_fe_normalize_var(&fex);
+
+ res_quad = secp256k1_ge_set_xquad(&ge_quad, &fex);
+ res_even = secp256k1_ge_set_xo_var(&ge_even, &fex, 0);
+ res_odd = secp256k1_ge_set_xo_var(&ge_odd, &fex, 1);
+
+ CHECK(res_quad == res_even);
+ CHECK(res_quad == res_odd);
+
+ if (res_quad) {
+ secp256k1_fe_normalize_var(&ge_quad.x);
+ secp256k1_fe_normalize_var(&ge_odd.x);
+ secp256k1_fe_normalize_var(&ge_even.x);
+ secp256k1_fe_normalize_var(&ge_quad.y);
+ secp256k1_fe_normalize_var(&ge_odd.y);
+ secp256k1_fe_normalize_var(&ge_even.y);
+
+ /* No infinity allowed. */
+ CHECK(!ge_quad.infinity);
+ CHECK(!ge_even.infinity);
+ CHECK(!ge_odd.infinity);
+
+ /* Check that the x coordinates check out. */
+ CHECK(secp256k1_fe_equal_var(&ge_quad.x, x));
+ CHECK(secp256k1_fe_equal_var(&ge_even.x, x));
+ CHECK(secp256k1_fe_equal_var(&ge_odd.x, x));
+
+ /* Check that the Y coordinate result in ge_quad is a square. */
+ CHECK(secp256k1_fe_is_quad_var(&ge_quad.y));
+
+ /* Check odd/even Y in ge_odd, ge_even. */
+ CHECK(secp256k1_fe_is_odd(&ge_odd.y));
+ CHECK(!secp256k1_fe_is_odd(&ge_even.y));
+
+ /* Check secp256k1_gej_has_quad_y_var. */
+ secp256k1_gej_set_ge(&gej_quad, &ge_quad);
+ CHECK(secp256k1_gej_has_quad_y_var(&gej_quad));
+ do {
+ random_fe_test(&fez);
+ } while (secp256k1_fe_is_zero(&fez));
+ secp256k1_gej_rescale(&gej_quad, &fez);
+ CHECK(secp256k1_gej_has_quad_y_var(&gej_quad));
+ secp256k1_gej_neg(&gej_quad, &gej_quad);
+ CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad));
+ do {
+ random_fe_test(&fez);
+ } while (secp256k1_fe_is_zero(&fez));
+ secp256k1_gej_rescale(&gej_quad, &fez);
+ CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad));
+ secp256k1_gej_neg(&gej_quad, &gej_quad);
+ CHECK(secp256k1_gej_has_quad_y_var(&gej_quad));
+ }
+}
+
+void run_group_decompress(void) {
+ int i;
+ for (i = 0; i < count * 4; i++) {
+ secp256k1_fe fe;
+ random_fe_test(&fe);
+ test_group_decompress(&fe);
+ }
+}
+
+/***** ECMULT TESTS *****/
+
+void run_ecmult_chain(void) {
+ /* random starting point A (on the curve) */
+ secp256k1_gej a = SECP256K1_GEJ_CONST(
+ 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3,
+ 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004,
+ 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f,
+ 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f
+ );
+ /* two random initial factors xn and gn */
+ secp256k1_scalar xn = SECP256K1_SCALAR_CONST(
+ 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c,
+ 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407
+ );
+ secp256k1_scalar gn = SECP256K1_SCALAR_CONST(
+ 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9,
+ 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de
+ );
+ /* two small multipliers to be applied to xn and gn in every iteration: */
+ static const secp256k1_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337);
+ static const secp256k1_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113);
+ /* accumulators with the resulting coefficients to A and G */
+ secp256k1_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ /* actual points */
+ secp256k1_gej x;
+ secp256k1_gej x2;
+ int i;
+
+ /* the point being computed */
+ x = a;
+ for (i = 0; i < 200*count; i++) {
+ /* in each iteration, compute X = xn*X + gn*G; */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn);
+ /* also compute ae and ge: the actual accumulated factors for A and G */
+ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */
+ secp256k1_scalar_mul(&ae, &ae, &xn);
+ secp256k1_scalar_mul(&ge, &ge, &xn);
+ secp256k1_scalar_add(&ge, &ge, &gn);
+ /* modify xn and gn */
+ secp256k1_scalar_mul(&xn, &xn, &xf);
+ secp256k1_scalar_mul(&gn, &gn, &gf);
+
+ /* verify */
+ if (i == 19999) {
+ /* expected result after 19999 iterations */
+ secp256k1_gej rp = SECP256K1_GEJ_CONST(
+ 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE,
+ 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830,
+ 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D,
+ 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88
+ );
+
+ secp256k1_gej_neg(&rp, &rp);
+ secp256k1_gej_add_var(&rp, &rp, &x, NULL);
+ CHECK(secp256k1_gej_is_infinity(&rp));
+ }
+ }
+ /* redo the computation, but directly with the resulting ae and ge coefficients: */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge);
+ secp256k1_gej_neg(&x2, &x2);
+ secp256k1_gej_add_var(&x2, &x2, &x, NULL);
+ CHECK(secp256k1_gej_is_infinity(&x2));
+}
+
+void test_point_times_order(const secp256k1_gej *point) {
+ /* X * (point + G) + (order-X) * (pointer + G) = 0 */
+ secp256k1_scalar x;
+ secp256k1_scalar nx;
+ secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_gej res1, res2;
+ secp256k1_ge res3;
+ unsigned char pub[65];
+ size_t psize = 65;
+ random_scalar_order_test(&x);
+ secp256k1_scalar_negate(&nx, &x);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */
+ secp256k1_gej_add_var(&res1, &res1, &res2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&res1));
+ CHECK(secp256k1_gej_is_valid_var(&res1) == 0);
+ secp256k1_ge_set_gej(&res3, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res3));
+ CHECK(secp256k1_ge_is_valid_var(&res3) == 0);
+ CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0);
+ psize = 65;
+ CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0);
+ /* check zero/one edge cases */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero);
+ secp256k1_ge_set_gej(&res3, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res3));
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero);
+ secp256k1_ge_set_gej(&res3, &res1);
+ ge_equals_gej(&res3, point);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one);
+ secp256k1_ge_set_gej(&res3, &res1);
+ ge_equals_ge(&res3, &secp256k1_ge_const_g);
+}
+
+void run_point_times_order(void) {
+ int i;
+ secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2);
+ static const secp256k1_fe xr = SECP256K1_FE_CONST(
+ 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C,
+ 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45
+ );
+ for (i = 0; i < 500; i++) {
+ secp256k1_ge p;
+ if (secp256k1_ge_set_xo_var(&p, &x, 1)) {
+ secp256k1_gej j;
+ CHECK(secp256k1_ge_is_valid_var(&p));
+ secp256k1_gej_set_ge(&j, &p);
+ CHECK(secp256k1_gej_is_valid_var(&j));
+ test_point_times_order(&j);
+ }
+ secp256k1_fe_sqr(&x, &x);
+ }
+ secp256k1_fe_normalize_var(&x);
+ CHECK(secp256k1_fe_equal_var(&x, &xr));
+}
+
+void ecmult_const_random_mult(void) {
+ /* random starting point A (on the curve) */
+ secp256k1_ge a = SECP256K1_GE_CONST(
+ 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b,
+ 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a,
+ 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c,
+ 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d
+ );
+ /* random initial factor xn */
+ secp256k1_scalar xn = SECP256K1_SCALAR_CONST(
+ 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327,
+ 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b
+ );
+ /* expected xn * A (from sage) */
+ secp256k1_ge expected_b = SECP256K1_GE_CONST(
+ 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd,
+ 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786,
+ 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f,
+ 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956
+ );
+ secp256k1_gej b;
+ secp256k1_ecmult_const(&b, &a, &xn, 256);
+
+ CHECK(secp256k1_ge_is_valid_var(&a));
+ ge_equals_gej(&expected_b, &b);
+}
+
+void ecmult_const_commutativity(void) {
+ secp256k1_scalar a;
+ secp256k1_scalar b;
+ secp256k1_gej res1;
+ secp256k1_gej res2;
+ secp256k1_ge mid1;
+ secp256k1_ge mid2;
+ random_scalar_order_test(&a);
+ random_scalar_order_test(&b);
+
+ secp256k1_ecmult_const(&res1, &secp256k1_ge_const_g, &a, 256);
+ secp256k1_ecmult_const(&res2, &secp256k1_ge_const_g, &b, 256);
+ secp256k1_ge_set_gej(&mid1, &res1);
+ secp256k1_ge_set_gej(&mid2, &res2);
+ secp256k1_ecmult_const(&res1, &mid1, &b, 256);
+ secp256k1_ecmult_const(&res2, &mid2, &a, 256);
+ secp256k1_ge_set_gej(&mid1, &res1);
+ secp256k1_ge_set_gej(&mid2, &res2);
+ ge_equals_ge(&mid1, &mid2);
+}
+
+void ecmult_const_mult_zero_one(void) {
+ secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_scalar negone;
+ secp256k1_gej res1;
+ secp256k1_ge res2;
+ secp256k1_ge point;
+ secp256k1_scalar_negate(&negone, &one);
+
+ random_group_element_test(&point);
+ secp256k1_ecmult_const(&res1, &point, &zero, 3);
+ secp256k1_ge_set_gej(&res2, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res2));
+ secp256k1_ecmult_const(&res1, &point, &one, 2);
+ secp256k1_ge_set_gej(&res2, &res1);
+ ge_equals_ge(&res2, &point);
+ secp256k1_ecmult_const(&res1, &point, &negone, 256);
+ secp256k1_gej_neg(&res1, &res1);
+ secp256k1_ge_set_gej(&res2, &res1);
+ ge_equals_ge(&res2, &point);
+}
+
+void ecmult_const_chain_multiply(void) {
+ /* Check known result (randomly generated test problem from sage) */
+ const secp256k1_scalar scalar = SECP256K1_SCALAR_CONST(
+ 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d,
+ 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b
+ );
+ const secp256k1_gej expected_point = SECP256K1_GEJ_CONST(
+ 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd,
+ 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f,
+ 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196,
+ 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435
+ );
+ secp256k1_gej point;
+ secp256k1_ge res;
+ int i;
+
+ secp256k1_gej_set_ge(&point, &secp256k1_ge_const_g);
+ for (i = 0; i < 100; ++i) {
+ secp256k1_ge tmp;
+ secp256k1_ge_set_gej(&tmp, &point);
+ secp256k1_ecmult_const(&point, &tmp, &scalar, 256);
+ }
+ secp256k1_ge_set_gej(&res, &point);
+ ge_equals_gej(&res, &expected_point);
+}
+
+void run_ecmult_const_tests(void) {
+ ecmult_const_mult_zero_one();
+ ecmult_const_random_mult();
+ ecmult_const_commutativity();
+ ecmult_const_chain_multiply();
+}
+
+typedef struct {
+ secp256k1_scalar *sc;
+ secp256k1_ge *pt;
+} ecmult_multi_data;
+
+static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) {
+ ecmult_multi_data *data = (ecmult_multi_data*) cbdata;
+ *sc = data->sc[idx];
+ *pt = data->pt[idx];
+ return 1;
+}
+
+static int ecmult_multi_false_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) {
+ (void)sc;
+ (void)pt;
+ (void)idx;
+ (void)cbdata;
+ return 0;
+}
+
+void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func ecmult_multi) {
+ int ncount;
+ secp256k1_scalar szero;
+ secp256k1_scalar sc[32];
+ secp256k1_ge pt[32];
+ secp256k1_gej r;
+ secp256k1_gej r2;
+ ecmult_multi_data data;
+ secp256k1_scratch *scratch_empty;
+
+ data.sc = sc;
+ data.pt = pt;
+ secp256k1_scalar_set_int(&szero, 0);
+
+ /* No points to multiply */
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0));
+
+ /* Check 1- and 2-point multiplies against ecmult */
+ for (ncount = 0; ncount < count; ncount++) {
+ secp256k1_ge ptg;
+ secp256k1_gej ptgj;
+ random_scalar_order(&sc[0]);
+ random_scalar_order(&sc[1]);
+
+ random_group_element_test(&ptg);
+ secp256k1_gej_set_ge(&ptgj, &ptg);
+ pt[0] = ptg;
+ pt[1] = secp256k1_ge_const_g;
+
+ /* only G scalar */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0));
+ secp256k1_gej_neg(&r2, &r2);
+ secp256k1_gej_add_var(&r, &r, &r2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&r));
+
+ /* 1-point */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1));
+ secp256k1_gej_neg(&r2, &r2);
+ secp256k1_gej_add_var(&r, &r, &r2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&r));
+
+ /* Try to multiply 1 point, but scratch space is empty */
+ scratch_empty = secp256k1_scratch_create(&ctx->error_callback, 0);
+ CHECK(!ecmult_multi(&ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1));
+ secp256k1_scratch_destroy(scratch_empty);
+
+ /* Try to multiply 1 point, but callback returns false */
+ CHECK(!ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1));
+
+ /* 2-point */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2));
+ secp256k1_gej_neg(&r2, &r2);
+ secp256k1_gej_add_var(&r, &r, &r2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&r));
+
+ /* 2-point with G scalar */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1));
+ secp256k1_gej_neg(&r2, &r2);
+ secp256k1_gej_add_var(&r, &r, &r2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+
+ /* Check infinite outputs of various forms */
+ for (ncount = 0; ncount < count; ncount++) {
+ secp256k1_ge ptg;
+ size_t i, j;
+ size_t sizes[] = { 2, 10, 32 };
+
+ for (j = 0; j < 3; j++) {
+ for (i = 0; i < 32; i++) {
+ random_scalar_order(&sc[i]);
+ secp256k1_ge_set_infinity(&pt[i]);
+ }
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+
+ for (j = 0; j < 3; j++) {
+ for (i = 0; i < 32; i++) {
+ random_group_element_test(&ptg);
+ pt[i] = ptg;
+ secp256k1_scalar_set_int(&sc[i], 0);
+ }
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+
+ for (j = 0; j < 3; j++) {
+ random_group_element_test(&ptg);
+ for (i = 0; i < 16; i++) {
+ random_scalar_order(&sc[2*i]);
+ secp256k1_scalar_negate(&sc[2*i + 1], &sc[2*i]);
+ pt[2 * i] = ptg;
+ pt[2 * i + 1] = ptg;
+ }
+
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
+ CHECK(secp256k1_gej_is_infinity(&r));
+
+ random_scalar_order(&sc[0]);
+ for (i = 0; i < 16; i++) {
+ random_group_element_test(&ptg);
+
+ sc[2*i] = sc[0];
+ sc[2*i+1] = sc[0];
+ pt[2 * i] = ptg;
+ secp256k1_ge_neg(&pt[2*i+1], &pt[2*i]);
+ }
+
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+
+ random_group_element_test(&ptg);
+ secp256k1_scalar_set_int(&sc[0], 0);
+ pt[0] = ptg;
+ for (i = 1; i < 32; i++) {
+ pt[i] = ptg;
+
+ random_scalar_order(&sc[i]);
+ secp256k1_scalar_add(&sc[0], &sc[0], &sc[i]);
+ secp256k1_scalar_negate(&sc[i], &sc[i]);
+ }
+
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32));
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+
+ /* Check random points, constant scalar */
+ for (ncount = 0; ncount < count; ncount++) {
+ size_t i;
+ secp256k1_gej_set_infinity(&r);
+
+ random_scalar_order(&sc[0]);
+ for (i = 0; i < 20; i++) {
+ secp256k1_ge ptg;
+ sc[i] = sc[0];
+ random_group_element_test(&ptg);
+ pt[i] = ptg;
+ secp256k1_gej_add_ge_var(&r, &r, &pt[i], NULL);
+ }
+
+ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
+ secp256k1_gej_neg(&r2, &r2);
+ secp256k1_gej_add_var(&r, &r, &r2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+
+ /* Check random scalars, constant point */
+ for (ncount = 0; ncount < count; ncount++) {
+ size_t i;
+ secp256k1_ge ptg;
+ secp256k1_gej p0j;
+ secp256k1_scalar rs;
+ secp256k1_scalar_set_int(&rs, 0);
+
+ random_group_element_test(&ptg);
+ for (i = 0; i < 20; i++) {
+ random_scalar_order(&sc[i]);
+ pt[i] = ptg;
+ secp256k1_scalar_add(&rs, &rs, &sc[i]);
+ }
+
+ secp256k1_gej_set_ge(&p0j, &pt[0]);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
+ secp256k1_gej_neg(&r2, &r2);
+ secp256k1_gej_add_var(&r, &r, &r2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+
+ /* Sanity check that zero scalars don't cause problems */
+ secp256k1_scalar_clear(&sc[0]);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
+ secp256k1_scalar_clear(&sc[1]);
+ secp256k1_scalar_clear(&sc[2]);
+ secp256k1_scalar_clear(&sc[3]);
+ secp256k1_scalar_clear(&sc[4]);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6));
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5));
+ CHECK(secp256k1_gej_is_infinity(&r));
+
+ /* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */
+ {
+ const size_t TOP = 8;
+ size_t s0i, s1i;
+ size_t t0i, t1i;
+ secp256k1_ge ptg;
+ secp256k1_gej ptgj;
+
+ random_group_element_test(&ptg);
+ secp256k1_gej_set_ge(&ptgj, &ptg);
+
+ for(t0i = 0; t0i < TOP; t0i++) {
+ for(t1i = 0; t1i < TOP; t1i++) {
+ secp256k1_gej t0p, t1p;
+ secp256k1_scalar t0, t1;
+
+ secp256k1_scalar_set_int(&t0, (t0i + 1) / 2);
+ secp256k1_scalar_cond_negate(&t0, t0i & 1);
+ secp256k1_scalar_set_int(&t1, (t1i + 1) / 2);
+ secp256k1_scalar_cond_negate(&t1, t1i & 1);
+
+ secp256k1_ecmult(&ctx->ecmult_ctx, &t0p, &ptgj, &t0, &szero);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &t1p, &ptgj, &t1, &szero);
+
+ for(s0i = 0; s0i < TOP; s0i++) {
+ for(s1i = 0; s1i < TOP; s1i++) {
+ secp256k1_scalar tmp1, tmp2;
+ secp256k1_gej expected, actual;
+
+ secp256k1_ge_set_gej(&pt[0], &t0p);
+ secp256k1_ge_set_gej(&pt[1], &t1p);
+
+ secp256k1_scalar_set_int(&sc[0], (s0i + 1) / 2);
+ secp256k1_scalar_cond_negate(&sc[0], s0i & 1);
+ secp256k1_scalar_set_int(&sc[1], (s1i + 1) / 2);
+ secp256k1_scalar_cond_negate(&sc[1], s1i & 1);
+
+ secp256k1_scalar_mul(&tmp1, &t0, &sc[0]);
+ secp256k1_scalar_mul(&tmp2, &t1, &sc[1]);
+ secp256k1_scalar_add(&tmp1, &tmp1, &tmp2);
+
+ secp256k1_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero);
+ CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2));
+ secp256k1_gej_neg(&expected, &expected);
+ secp256k1_gej_add_var(&actual, &actual, &expected, NULL);
+ CHECK(secp256k1_gej_is_infinity(&actual));
+ }
+ }
+ }
+ }
+ }
+}
+
+void test_secp256k1_pippenger_bucket_window_inv(void) {
+ int i;
+
+ CHECK(secp256k1_pippenger_bucket_window_inv(0) == 0);
+ for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) {
+#ifdef USE_ENDOMORPHISM
+ /* Bucket_window of 8 is not used with endo */
+ if (i == 8) {
+ continue;
+ }
+#endif
+ CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)) == i);
+ if (i != PIPPENGER_MAX_BUCKET_WINDOW) {
+ CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)+1) > i);
+ }
+ }
+}
+
+/**
+ * Probabilistically test the function returning the maximum number of possible points
+ * for a given scratch space.
+ */
+void test_ecmult_multi_pippenger_max_points(void) {
+ size_t scratch_size = secp256k1_rand_int(256);
+ size_t max_size = secp256k1_pippenger_scratch_size(secp256k1_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12);
+ secp256k1_scratch *scratch;
+ size_t n_points_supported;
+ int bucket_window = 0;
+
+ for(; scratch_size < max_size; scratch_size+=256) {
+ scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size);
+ CHECK(scratch != NULL);
+ n_points_supported = secp256k1_pippenger_max_points(scratch);
+ if (n_points_supported == 0) {
+ secp256k1_scratch_destroy(scratch);
+ continue;
+ }
+ bucket_window = secp256k1_pippenger_bucket_window(n_points_supported);
+ CHECK(secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points_supported, bucket_window), PIPPENGER_SCRATCH_OBJECTS));
+ secp256k1_scratch_deallocate_frame(scratch);
+ secp256k1_scratch_destroy(scratch);
+ }
+ CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW);
+}
+
+/**
+ * Run secp256k1_ecmult_multi_var with num points and a scratch space restricted to
+ * 1 <= i <= num points.
+ */
+void test_ecmult_multi_batching(void) {
+ static const int n_points = 2*ECMULT_PIPPENGER_THRESHOLD;
+ secp256k1_scalar scG;
+ secp256k1_scalar szero;
+ secp256k1_scalar *sc = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_scalar) * n_points);
+ secp256k1_ge *pt = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * n_points);
+ secp256k1_gej r;
+ secp256k1_gej r2;
+ ecmult_multi_data data;
+ int i;
+ secp256k1_scratch *scratch;
+
+ secp256k1_gej_set_infinity(&r2);
+ secp256k1_scalar_set_int(&szero, 0);
+
+ /* Get random scalars and group elements and compute result */
+ random_scalar_order(&scG);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r2, &szero, &scG);
+ for(i = 0; i < n_points; i++) {
+ secp256k1_ge ptg;
+ secp256k1_gej ptgj;
+ random_group_element_test(&ptg);
+ secp256k1_gej_set_ge(&ptgj, &ptg);
+ pt[i] = ptg;
+ random_scalar_order(&sc[i]);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &ptgj, &ptgj, &sc[i], NULL);
+ secp256k1_gej_add_var(&r2, &r2, &ptgj, NULL);
+ }
+ data.sc = sc;
+ data.pt = pt;
+
+ /* Test with empty scratch space */
+ scratch = secp256k1_scratch_create(&ctx->error_callback, 0);
+ CHECK(!secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, 1));
+ secp256k1_scratch_destroy(scratch);
+
+ /* Test with space for 1 point in pippenger. That's not enough because
+ * ecmult_multi selects strauss which requires more memory. */
+ scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT);
+ CHECK(!secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, 1));
+ secp256k1_scratch_destroy(scratch);
+
+ secp256k1_gej_neg(&r2, &r2);
+ for(i = 1; i <= n_points; i++) {
+ if (i > ECMULT_PIPPENGER_THRESHOLD) {
+ int bucket_window = secp256k1_pippenger_bucket_window(i);
+ size_t scratch_size = secp256k1_pippenger_scratch_size(i, bucket_window);
+ scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT);
+ } else {
+ size_t scratch_size = secp256k1_strauss_scratch_size(i);
+ scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT);
+ }
+ CHECK(secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
+ secp256k1_gej_add_var(&r, &r, &r2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&r));
+ secp256k1_scratch_destroy(scratch);
+ }
+ free(sc);
+ free(pt);
+}
+
+void run_ecmult_multi_tests(void) {
+ secp256k1_scratch *scratch;
+
+ test_secp256k1_pippenger_bucket_window_inv();
+ test_ecmult_multi_pippenger_max_points();
+ scratch = secp256k1_scratch_create(&ctx->error_callback, 819200);
+ test_ecmult_multi(scratch, secp256k1_ecmult_multi_var);
+ test_ecmult_multi(scratch, secp256k1_ecmult_pippenger_batch_single);
+ test_ecmult_multi(scratch, secp256k1_ecmult_strauss_batch_single);
+ secp256k1_scratch_destroy(scratch);
+
+ /* Run test_ecmult_multi with space for exactly one point */
+ scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT);
+ test_ecmult_multi(scratch, secp256k1_ecmult_multi_var);
+ secp256k1_scratch_destroy(scratch);
+
+ test_ecmult_multi_batching();
+}
+
+void test_wnaf(const secp256k1_scalar *number, int w) {
+ secp256k1_scalar x, two, t;
+ int wnaf[256];
+ int zeroes = -1;
+ int i;
+ int bits;
+ secp256k1_scalar_set_int(&x, 0);
+ secp256k1_scalar_set_int(&two, 2);
+ bits = secp256k1_ecmult_wnaf(wnaf, 256, number, w);
+ CHECK(bits <= 256);
+ for (i = bits-1; i >= 0; i--) {
+ int v = wnaf[i];
+ secp256k1_scalar_mul(&x, &x, &two);
+ if (v) {
+ CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */
+ zeroes=0;
+ CHECK((v & 1) == 1); /* check non-zero elements are odd */
+ CHECK(v <= (1 << (w-1)) - 1); /* check range below */
+ CHECK(v >= -(1 << (w-1)) - 1); /* check range above */
+ } else {
+ CHECK(zeroes != -1); /* check that no unnecessary zero padding exists */
+ zeroes++;
+ }
+ if (v >= 0) {
+ secp256k1_scalar_set_int(&t, v);
+ } else {
+ secp256k1_scalar_set_int(&t, -v);
+ secp256k1_scalar_negate(&t, &t);
+ }
+ secp256k1_scalar_add(&x, &x, &t);
+ }
+ CHECK(secp256k1_scalar_eq(&x, number)); /* check that wnaf represents number */
+}
+
+void test_constant_wnaf_negate(const secp256k1_scalar *number) {
+ secp256k1_scalar neg1 = *number;
+ secp256k1_scalar neg2 = *number;
+ int sign1 = 1;
+ int sign2 = 1;
+
+ if (!secp256k1_scalar_get_bits(&neg1, 0, 1)) {
+ secp256k1_scalar_negate(&neg1, &neg1);
+ sign1 = -1;
+ }
+ sign2 = secp256k1_scalar_cond_negate(&neg2, secp256k1_scalar_is_even(&neg2));
+ CHECK(sign1 == sign2);
+ CHECK(secp256k1_scalar_eq(&neg1, &neg2));
+}
+
+void test_constant_wnaf(const secp256k1_scalar *number, int w) {
+ secp256k1_scalar x, shift;
+ int wnaf[256] = {0};
+ int i;
+ int skew;
+ int bits = 256;
+ secp256k1_scalar num = *number;
+
+ secp256k1_scalar_set_int(&x, 0);
+ secp256k1_scalar_set_int(&shift, 1 << w);
+ /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
+#ifdef USE_ENDOMORPHISM
+ for (i = 0; i < 16; ++i) {
+ secp256k1_scalar_shr_int(&num, 8);
+ }
+ bits = 128;
+#endif
+ skew = secp256k1_wnaf_const(wnaf, num, w, bits);
+
+ for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) {
+ secp256k1_scalar t;
+ int v = wnaf[i];
+ CHECK(v != 0); /* check nonzero */
+ CHECK(v & 1); /* check parity */
+ CHECK(v > -(1 << w)); /* check range above */
+ CHECK(v < (1 << w)); /* check range below */
+
+ secp256k1_scalar_mul(&x, &x, &shift);
+ if (v >= 0) {
+ secp256k1_scalar_set_int(&t, v);
+ } else {
+ secp256k1_scalar_set_int(&t, -v);
+ secp256k1_scalar_negate(&t, &t);
+ }
+ secp256k1_scalar_add(&x, &x, &t);
+ }
+ /* Skew num because when encoding numbers as odd we use an offset */
+ secp256k1_scalar_cadd_bit(&num, skew == 2, 1);
+ CHECK(secp256k1_scalar_eq(&x, &num));
+}
+
+void test_fixed_wnaf(const secp256k1_scalar *number, int w) {
+ secp256k1_scalar x, shift;
+ int wnaf[256] = {0};
+ int i;
+ int skew;
+ secp256k1_scalar num = *number;
+
+ secp256k1_scalar_set_int(&x, 0);
+ secp256k1_scalar_set_int(&shift, 1 << w);
+ /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
+#ifdef USE_ENDOMORPHISM
+ for (i = 0; i < 16; ++i) {
+ secp256k1_scalar_shr_int(&num, 8);
+ }
+#endif
+ skew = secp256k1_wnaf_fixed(wnaf, &num, w);
+
+ for (i = WNAF_SIZE(w)-1; i >= 0; --i) {
+ secp256k1_scalar t;
+ int v = wnaf[i];
+ CHECK(v == 0 || v & 1); /* check parity */
+ CHECK(v > -(1 << w)); /* check range above */
+ CHECK(v < (1 << w)); /* check range below */
+
+ secp256k1_scalar_mul(&x, &x, &shift);
+ if (v >= 0) {
+ secp256k1_scalar_set_int(&t, v);
+ } else {
+ secp256k1_scalar_set_int(&t, -v);
+ secp256k1_scalar_negate(&t, &t);
+ }
+ secp256k1_scalar_add(&x, &x, &t);
+ }
+ /* If skew is 1 then add 1 to num */
+ secp256k1_scalar_cadd_bit(&num, 0, skew == 1);
+ CHECK(secp256k1_scalar_eq(&x, &num));
+}
+
+/* Checks that the first 8 elements of wnaf are equal to wnaf_expected and the
+ * rest is 0.*/
+void test_fixed_wnaf_small_helper(int *wnaf, int *wnaf_expected, int w) {
+ int i;
+ for (i = WNAF_SIZE(w)-1; i >= 8; --i) {
+ CHECK(wnaf[i] == 0);
+ }
+ for (i = 7; i >= 0; --i) {
+ CHECK(wnaf[i] == wnaf_expected[i]);
+ }
+}
+
+void test_fixed_wnaf_small(void) {
+ int w = 4;
+ int wnaf[256] = {0};
+ int i;
+ int skew;
+ secp256k1_scalar num;
+
+ secp256k1_scalar_set_int(&num, 0);
+ skew = secp256k1_wnaf_fixed(wnaf, &num, w);
+ for (i = WNAF_SIZE(w)-1; i >= 0; --i) {
+ int v = wnaf[i];
+ CHECK(v == 0);
+ }
+ CHECK(skew == 0);
+
+ secp256k1_scalar_set_int(&num, 1);
+ skew = secp256k1_wnaf_fixed(wnaf, &num, w);
+ for (i = WNAF_SIZE(w)-1; i >= 1; --i) {
+ int v = wnaf[i];
+ CHECK(v == 0);
+ }
+ CHECK(wnaf[0] == 1);
+ CHECK(skew == 0);
+
+ {
+ int wnaf_expected[8] = { 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf };
+ secp256k1_scalar_set_int(&num, 0xffffffff);
+ skew = secp256k1_wnaf_fixed(wnaf, &num, w);
+ test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w);
+ CHECK(skew == 0);
+ }
+ {
+ int wnaf_expected[8] = { -1, -1, -1, -1, -1, -1, -1, 0xf };
+ secp256k1_scalar_set_int(&num, 0xeeeeeeee);
+ skew = secp256k1_wnaf_fixed(wnaf, &num, w);
+ test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w);
+ CHECK(skew == 1);
+ }
+ {
+ int wnaf_expected[8] = { 1, 0, 1, 0, 1, 0, 1, 0 };
+ secp256k1_scalar_set_int(&num, 0x01010101);
+ skew = secp256k1_wnaf_fixed(wnaf, &num, w);
+ test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w);
+ CHECK(skew == 0);
+ }
+ {
+ int wnaf_expected[8] = { -0xf, 0, 0xf, -0xf, 0, 0xf, 1, 0 };
+ secp256k1_scalar_set_int(&num, 0x01ef1ef1);
+ skew = secp256k1_wnaf_fixed(wnaf, &num, w);
+ test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w);
+ CHECK(skew == 0);
+ }
+}
+
+void run_wnaf(void) {
+ int i;
+ secp256k1_scalar n = {{0}};
+
+ /* Sanity check: 1 and 2 are the smallest odd and even numbers and should
+ * have easier-to-diagnose failure modes */
+ n.d[0] = 1;
+ test_constant_wnaf(&n, 4);
+ n.d[0] = 2;
+ test_constant_wnaf(&n, 4);
+ /* Test 0 */
+ test_fixed_wnaf_small();
+ /* Random tests */
+ for (i = 0; i < count; i++) {
+ random_scalar_order(&n);
+ test_wnaf(&n, 4+(i%10));
+ test_constant_wnaf_negate(&n);
+ test_constant_wnaf(&n, 4 + (i % 10));
+ test_fixed_wnaf(&n, 4 + (i % 10));
+ }
+ secp256k1_scalar_set_int(&n, 0);
+ CHECK(secp256k1_scalar_cond_negate(&n, 1) == -1);
+ CHECK(secp256k1_scalar_is_zero(&n));
+ CHECK(secp256k1_scalar_cond_negate(&n, 0) == 1);
+ CHECK(secp256k1_scalar_is_zero(&n));
+}
+
+void test_ecmult_constants(void) {
+ /* Test ecmult_gen() for [0..36) and [order-36..0). */
+ secp256k1_scalar x;
+ secp256k1_gej r;
+ secp256k1_ge ng;
+ int i;
+ int j;
+ secp256k1_ge_neg(&ng, &secp256k1_ge_const_g);
+ for (i = 0; i < 36; i++ ) {
+ secp256k1_scalar_set_int(&x, i);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x);
+ for (j = 0; j < i; j++) {
+ if (j == i - 1) {
+ ge_equals_gej(&secp256k1_ge_const_g, &r);
+ }
+ secp256k1_gej_add_ge(&r, &r, &ng);
+ }
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+ for (i = 1; i <= 36; i++ ) {
+ secp256k1_scalar_set_int(&x, i);
+ secp256k1_scalar_negate(&x, &x);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x);
+ for (j = 0; j < i; j++) {
+ if (j == i - 1) {
+ ge_equals_gej(&ng, &r);
+ }
+ secp256k1_gej_add_ge(&r, &r, &secp256k1_ge_const_g);
+ }
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+}
+
+void run_ecmult_constants(void) {
+ test_ecmult_constants();
+}
+
+void test_ecmult_gen_blind(void) {
+ /* Test ecmult_gen() blinding and confirm that the blinding changes, the affine points match, and the z's don't match. */
+ secp256k1_scalar key;
+ secp256k1_scalar b;
+ unsigned char seed32[32];
+ secp256k1_gej pgej;
+ secp256k1_gej pgej2;
+ secp256k1_gej i;
+ secp256k1_ge pge;
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key);
+ secp256k1_rand256(seed32);
+ b = ctx->ecmult_gen_ctx.blind;
+ i = ctx->ecmult_gen_ctx.initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
+ CHECK(!secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind));
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key);
+ CHECK(!gej_xyz_equals_gej(&pgej, &pgej2));
+ CHECK(!gej_xyz_equals_gej(&i, &ctx->ecmult_gen_ctx.initial));
+ secp256k1_ge_set_gej(&pge, &pgej);
+ ge_equals_gej(&pge, &pgej2);
+}
+
+void test_ecmult_gen_blind_reset(void) {
+ /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */
+ secp256k1_scalar b;
+ secp256k1_gej initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0);
+ b = ctx->ecmult_gen_ctx.blind;
+ initial = ctx->ecmult_gen_ctx.initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0);
+ CHECK(secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind));
+ CHECK(gej_xyz_equals_gej(&initial, &ctx->ecmult_gen_ctx.initial));
+}
+
+void run_ecmult_gen_blind(void) {
+ int i;
+ test_ecmult_gen_blind_reset();
+ for (i = 0; i < 10; i++) {
+ test_ecmult_gen_blind();
+ }
+}
+
+#ifdef USE_ENDOMORPHISM
+/***** ENDOMORPHISH TESTS *****/
+void test_scalar_split(void) {
+ secp256k1_scalar full;
+ secp256k1_scalar s1, slam;
+ const unsigned char zero[32] = {0};
+ unsigned char tmp[32];
+
+ random_scalar_order_test(&full);
+ secp256k1_scalar_split_lambda(&s1, &slam, &full);
+
+ /* check that both are <= 128 bits in size */
+ if (secp256k1_scalar_is_high(&s1)) {
+ secp256k1_scalar_negate(&s1, &s1);
+ }
+ if (secp256k1_scalar_is_high(&slam)) {
+ secp256k1_scalar_negate(&slam, &slam);
+ }
+
+ secp256k1_scalar_get_b32(tmp, &s1);
+ CHECK(memcmp(zero, tmp, 16) == 0);
+ secp256k1_scalar_get_b32(tmp, &slam);
+ CHECK(memcmp(zero, tmp, 16) == 0);
+}
+
+void run_endomorphism_tests(void) {
+ test_scalar_split();
+}
+#endif
+
+void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) {
+ unsigned char pubkeyc[65];
+ secp256k1_pubkey pubkey;
+ secp256k1_ge ge;
+ size_t pubkeyclen;
+ int32_t ecount;
+ ecount = 0;
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ for (pubkeyclen = 3; pubkeyclen <= 65; pubkeyclen++) {
+ /* Smaller sizes are tested exhaustively elsewhere. */
+ int32_t i;
+ memcpy(&pubkeyc[1], input, 64);
+ VG_UNDEF(&pubkeyc[pubkeyclen], 65 - pubkeyclen);
+ for (i = 0; i < 256; i++) {
+ /* Try all type bytes. */
+ int xpass;
+ int ypass;
+ int ysign;
+ pubkeyc[0] = i;
+ /* What sign does this point have? */
+ ysign = (input[63] & 1) + 2;
+ /* For the current type (i) do we expect parsing to work? Handled all of compressed/uncompressed/hybrid. */
+ xpass = xvalid && (pubkeyclen == 33) && ((i & 254) == 2);
+ /* Do we expect a parse and re-serialize as uncompressed to give a matching y? */
+ ypass = xvalid && yvalid && ((i & 4) == ((pubkeyclen == 65) << 2)) &&
+ ((i == 4) || ((i & 251) == ysign)) && ((pubkeyclen == 33) || (pubkeyclen == 65));
+ if (xpass || ypass) {
+ /* These cases must parse. */
+ unsigned char pubkeyo[65];
+ size_t outl;
+ memset(&pubkey, 0, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ ecount = 0;
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ outl = 65;
+ VG_UNDEF(pubkeyo, 65);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
+ VG_CHECK(pubkeyo, outl);
+ CHECK(outl == 33);
+ CHECK(memcmp(&pubkeyo[1], &pubkeyc[1], 32) == 0);
+ CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0]));
+ if (ypass) {
+ /* This test isn't always done because we decode with alternative signs, so the y won't match. */
+ CHECK(pubkeyo[0] == ysign);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1);
+ memset(&pubkey, 0, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ secp256k1_pubkey_save(&pubkey, &ge);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ outl = 65;
+ VG_UNDEF(pubkeyo, 65);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1);
+ VG_CHECK(pubkeyo, outl);
+ CHECK(outl == 65);
+ CHECK(pubkeyo[0] == 4);
+ CHECK(memcmp(&pubkeyo[1], input, 64) == 0);
+ }
+ CHECK(ecount == 0);
+ } else {
+ /* These cases must fail to parse. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ }
+ }
+ }
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+}
+
+void run_ec_pubkey_parse_test(void) {
+#define SECP256K1_EC_PARSE_TEST_NVALID (12)
+ const unsigned char valid[SECP256K1_EC_PARSE_TEST_NVALID][64] = {
+ {
+ /* Point with leading and trailing zeros in x and y serialization. */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x64, 0xef, 0xa1, 0x7b, 0x77, 0x61, 0xe1, 0xe4, 0x27, 0x06, 0x98, 0x9f, 0xb4, 0x83,
+ 0xb8, 0xd2, 0xd4, 0x9b, 0xf7, 0x8f, 0xae, 0x98, 0x03, 0xf0, 0x99, 0xb8, 0x34, 0xed, 0xeb, 0x00
+ },
+ {
+ /* Point with x equal to a 3rd root of unity.*/
+ 0x7a, 0xe9, 0x6a, 0x2b, 0x65, 0x7c, 0x07, 0x10, 0x6e, 0x64, 0x47, 0x9e, 0xac, 0x34, 0x34, 0xe9,
+ 0x9c, 0xf0, 0x49, 0x75, 0x12, 0xf5, 0x89, 0x95, 0xc1, 0x39, 0x6c, 0x28, 0x71, 0x95, 0x01, 0xee,
+ 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14,
+ 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee,
+ },
+ {
+ /* Point with largest x. (1/2) */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2c,
+ 0x0e, 0x99, 0x4b, 0x14, 0xea, 0x72, 0xf8, 0xc3, 0xeb, 0x95, 0xc7, 0x1e, 0xf6, 0x92, 0x57, 0x5e,
+ 0x77, 0x50, 0x58, 0x33, 0x2d, 0x7e, 0x52, 0xd0, 0x99, 0x5c, 0xf8, 0x03, 0x88, 0x71, 0xb6, 0x7d,
+ },
+ {
+ /* Point with largest x. (2/2) */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2c,
+ 0xf1, 0x66, 0xb4, 0xeb, 0x15, 0x8d, 0x07, 0x3c, 0x14, 0x6a, 0x38, 0xe1, 0x09, 0x6d, 0xa8, 0xa1,
+ 0x88, 0xaf, 0xa7, 0xcc, 0xd2, 0x81, 0xad, 0x2f, 0x66, 0xa3, 0x07, 0xfb, 0x77, 0x8e, 0x45, 0xb2,
+ },
+ {
+ /* Point with smallest x. (1/2) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14,
+ 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee,
+ },
+ {
+ /* Point with smallest x. (2/2) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0xbd, 0xe7, 0x0d, 0xf5, 0x19, 0x39, 0xb9, 0x4c, 0x9c, 0x24, 0x97, 0x9f, 0xa7, 0xdd, 0x04, 0xeb,
+ 0xd9, 0xb3, 0x57, 0x2d, 0xa7, 0x80, 0x22, 0x90, 0x43, 0x8a, 0xf2, 0xa6, 0x81, 0x89, 0x54, 0x41,
+ },
+ {
+ /* Point with largest y. (1/3) */
+ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6,
+ 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ },
+ {
+ /* Point with largest y. (2/3) */
+ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c,
+ 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ },
+ {
+ /* Point with largest y. (3/3) */
+ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc,
+ 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ },
+ {
+ /* Point with smallest y. (1/3) */
+ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6,
+ 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ },
+ {
+ /* Point with smallest y. (2/3) */
+ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c,
+ 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ },
+ {
+ /* Point with smallest y. (3/3) */
+ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc,
+ 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+ }
+ };
+#define SECP256K1_EC_PARSE_TEST_NXVALID (4)
+ const unsigned char onlyxvalid[SECP256K1_EC_PARSE_TEST_NXVALID][64] = {
+ {
+ /* Valid if y overflow ignored (y = 1 mod p). (1/3) */
+ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6,
+ 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ },
+ {
+ /* Valid if y overflow ignored (y = 1 mod p). (2/3) */
+ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c,
+ 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ },
+ {
+ /* Valid if y overflow ignored (y = 1 mod p). (3/3)*/
+ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc,
+ 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ },
+ {
+ /* x on curve, y is from y^2 = x^3 + 8. */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03
+ }
+ };
+#define SECP256K1_EC_PARSE_TEST_NINVALID (7)
+ const unsigned char invalid[SECP256K1_EC_PARSE_TEST_NINVALID][64] = {
+ {
+ /* x is third root of -8, y is -1 * (x^3+7); also on the curve for y^2 = x^3 + 9. */
+ 0x0a, 0x2d, 0x2b, 0xa9, 0x35, 0x07, 0xf1, 0xdf, 0x23, 0x37, 0x70, 0xc2, 0xa7, 0x97, 0x96, 0x2c,
+ 0xc6, 0x1f, 0x6d, 0x15, 0xda, 0x14, 0xec, 0xd4, 0x7d, 0x8d, 0x27, 0xae, 0x1c, 0xd5, 0xf8, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ },
+ {
+ /* Valid if x overflow ignored (x = 1 mod p). */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14,
+ 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee,
+ },
+ {
+ /* Valid if x overflow ignored (x = 1 mod p). */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ 0xbd, 0xe7, 0x0d, 0xf5, 0x19, 0x39, 0xb9, 0x4c, 0x9c, 0x24, 0x97, 0x9f, 0xa7, 0xdd, 0x04, 0xeb,
+ 0xd9, 0xb3, 0x57, 0x2d, 0xa7, 0x80, 0x22, 0x90, 0x43, 0x8a, 0xf2, 0xa6, 0x81, 0x89, 0x54, 0x41,
+ },
+ {
+ /* x is -1, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 5. */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ 0xf4, 0x84, 0x14, 0x5c, 0xb0, 0x14, 0x9b, 0x82, 0x5d, 0xff, 0x41, 0x2f, 0xa0, 0x52, 0xa8, 0x3f,
+ 0xcb, 0x72, 0xdb, 0x61, 0xd5, 0x6f, 0x37, 0x70, 0xce, 0x06, 0x6b, 0x73, 0x49, 0xa2, 0xaa, 0x28,
+ },
+ {
+ /* x is -1, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 5. */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ 0x0b, 0x7b, 0xeb, 0xa3, 0x4f, 0xeb, 0x64, 0x7d, 0xa2, 0x00, 0xbe, 0xd0, 0x5f, 0xad, 0x57, 0xc0,
+ 0x34, 0x8d, 0x24, 0x9e, 0x2a, 0x90, 0xc8, 0x8f, 0x31, 0xf9, 0x94, 0x8b, 0xb6, 0x5d, 0x52, 0x07,
+ },
+ {
+ /* x is zero, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 7. */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8f, 0x53, 0x7e, 0xef, 0xdf, 0xc1, 0x60, 0x6a, 0x07, 0x27, 0xcd, 0x69, 0xb4, 0xa7, 0x33, 0x3d,
+ 0x38, 0xed, 0x44, 0xe3, 0x93, 0x2a, 0x71, 0x79, 0xee, 0xcb, 0x4b, 0x6f, 0xba, 0x93, 0x60, 0xdc,
+ },
+ {
+ /* x is zero, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 7. */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0xac, 0x81, 0x10, 0x20, 0x3e, 0x9f, 0x95, 0xf8, 0xd8, 0x32, 0x96, 0x4b, 0x58, 0xcc, 0xc2,
+ 0xc7, 0x12, 0xbb, 0x1c, 0x6c, 0xd5, 0x8e, 0x86, 0x11, 0x34, 0xb4, 0x8f, 0x45, 0x6c, 0x9b, 0x53
+ }
+ };
+ const unsigned char pubkeyc[66] = {
+ /* Serialization of G. */
+ 0x04, 0x79, 0xBE, 0x66, 0x7E, 0xF9, 0xDC, 0xBB, 0xAC, 0x55, 0xA0, 0x62, 0x95, 0xCE, 0x87, 0x0B,
+ 0x07, 0x02, 0x9B, 0xFC, 0xDB, 0x2D, 0xCE, 0x28, 0xD9, 0x59, 0xF2, 0x81, 0x5B, 0x16, 0xF8, 0x17,
+ 0x98, 0x48, 0x3A, 0xDA, 0x77, 0x26, 0xA3, 0xC4, 0x65, 0x5D, 0xA4, 0xFB, 0xFC, 0x0E, 0x11, 0x08,
+ 0xA8, 0xFD, 0x17, 0xB4, 0x48, 0xA6, 0x85, 0x54, 0x19, 0x9C, 0x47, 0xD0, 0x8F, 0xFB, 0x10, 0xD4,
+ 0xB8, 0x00
+ };
+ unsigned char sout[65];
+ unsigned char shortkey[2];
+ secp256k1_ge ge;
+ secp256k1_pubkey pubkey;
+ size_t len;
+ int32_t i;
+ int32_t ecount;
+ int32_t ecount2;
+ ecount = 0;
+ /* Nothing should be reading this far into pubkeyc. */
+ VG_UNDEF(&pubkeyc[65], 1);
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ /* Zero length claimed, fail, zeroize, no illegal arg error. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(shortkey, 2);
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ /* Length one claimed, fail, zeroize, no illegal arg error. */
+ for (i = 0; i < 256 ; i++) {
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ shortkey[0] = i;
+ VG_UNDEF(&shortkey[1], 1);
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ }
+ /* Length two claimed, fail, zeroize, no illegal arg error. */
+ for (i = 0; i < 65536 ; i++) {
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ shortkey[0] = i & 255;
+ shortkey[1] = i >> 8;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ }
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ /* 33 bytes claimed on otherwise valid input starting with 0x04, fail, zeroize output, no illegal arg error. */
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ /* NULL pubkey, illegal arg error. Pubkey isn't rewritten before this step, since it's NULL into the parser. */
+ CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0);
+ CHECK(ecount == 2);
+ /* NULL input string. Illegal arg and zeroize output. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 1);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 2);
+ /* 64 bytes claimed on input starting with 0x04, fail, zeroize output, no illegal arg error. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ /* 66 bytes claimed, fail, zeroize output, no illegal arg error. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ /* Valid parse. */
+ memset(&pubkey, 0, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ VG_UNDEF(&ge, sizeof(ge));
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1);
+ VG_CHECK(&ge.x, sizeof(ge.x));
+ VG_CHECK(&ge.y, sizeof(ge.y));
+ VG_CHECK(&ge.infinity, sizeof(ge.infinity));
+ ge_equals_ge(&secp256k1_ge_const_g, &ge);
+ CHECK(ecount == 0);
+ /* secp256k1_ec_pubkey_serialize illegal args. */
+ ecount = 0;
+ len = 65;
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0);
+ CHECK(ecount == 1);
+ CHECK(len == 0);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0);
+ CHECK(ecount == 2);
+ len = 65;
+ VG_UNDEF(sout, 65);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0);
+ VG_CHECK(sout, 65);
+ CHECK(ecount == 3);
+ CHECK(len == 0);
+ len = 65;
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0);
+ CHECK(ecount == 4);
+ CHECK(len == 0);
+ len = 65;
+ VG_UNDEF(sout, 65);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1);
+ VG_CHECK(sout, 65);
+ CHECK(ecount == 4);
+ CHECK(len == 65);
+ /* Multiple illegal args. Should still set arg error only once. */
+ ecount = 0;
+ ecount2 = 11;
+ CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0);
+ CHECK(ecount == 1);
+ /* Does the illegal arg callback actually change the behavior? */
+ secp256k1_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2);
+ CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0);
+ CHECK(ecount == 1);
+ CHECK(ecount2 == 10);
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+ /* Try a bunch of prefabbed points with all possible encodings. */
+ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NVALID; i++) {
+ ec_pubkey_parse_pointtest(valid[i], 1, 1);
+ }
+ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NXVALID; i++) {
+ ec_pubkey_parse_pointtest(onlyxvalid[i], 1, 0);
+ }
+ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NINVALID; i++) {
+ ec_pubkey_parse_pointtest(invalid[i], 0, 0);
+ }
+}
+
+void run_eckey_edge_case_test(void) {
+ const unsigned char orderc[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41
+ };
+ const unsigned char zeros[sizeof(secp256k1_pubkey)] = {0x00};
+ unsigned char ctmp[33];
+ unsigned char ctmp2[33];
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey pubkey2;
+ secp256k1_pubkey pubkey_one;
+ secp256k1_pubkey pubkey_negone;
+ const secp256k1_pubkey *pubkeys[3];
+ size_t len;
+ int32_t ecount;
+ /* Group order is too large, reject. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, orderc) == 0);
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, orderc) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* Maximum value is too large, reject. */
+ memset(ctmp, 255, 32);
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
+ memset(&pubkey, 1, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* Zero is too small, reject. */
+ memset(ctmp, 0, 32);
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
+ memset(&pubkey, 1, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* One must be accepted. */
+ ctmp[31] = 0x01;
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
+ memset(&pubkey, 0, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ pubkey_one = pubkey;
+ /* Group order + 1 is too large, reject. */
+ memcpy(ctmp, orderc, 32);
+ ctmp[31] = 0x42;
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
+ memset(&pubkey, 1, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* -1 must be accepted. */
+ ctmp[31] = 0x40;
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
+ memset(&pubkey, 0, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ pubkey_negone = pubkey;
+ /* Tweak of zero leaves the value unchanged. */
+ memset(ctmp2, 0, 32);
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, ctmp2) == 1);
+ CHECK(memcmp(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40);
+ memcpy(&pubkey2, &pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ /* Multiply tweak of zero zeroizes the output. */
+ CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, ctmp2) == 0);
+ CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ /* Overflowing key tweak zeroizes. */
+ memcpy(ctmp, orderc, 32);
+ ctmp[31] = 0x40;
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, orderc) == 0);
+ CHECK(memcmp(zeros, ctmp, 32) == 0);
+ memcpy(ctmp, orderc, 32);
+ ctmp[31] = 0x40;
+ CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, orderc) == 0);
+ CHECK(memcmp(zeros, ctmp, 32) == 0);
+ memcpy(ctmp, orderc, 32);
+ ctmp[31] = 0x40;
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ /* Private key tweaks results in a key of zero. */
+ ctmp2[31] = 1;
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 0);
+ CHECK(memcmp(zeros, ctmp2, 32) == 0);
+ ctmp2[31] = 1;
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ /* Tweak computation wraps and results in a key of 1. */
+ ctmp2[31] = 2;
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 1);
+ CHECK(memcmp(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1);
+ ctmp2[31] = 2;
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
+ ctmp2[31] = 1;
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ /* Tweak mul * 2 = 1+1. */
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
+ ctmp2[31] = 2;
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ /* Test argument errors. */
+ ecount = 0;
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ CHECK(ecount == 0);
+ /* Zeroize pubkey on parse error. */
+ memset(&pubkey, 0, 32);
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ memset(&pubkey2, 0, 32);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0);
+ CHECK(ecount == 2);
+ CHECK(memcmp(&pubkey2, zeros, sizeof(pubkey2)) == 0);
+ /* Plain argument errors. */
+ ecount = 0;
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_ec_seckey_verify(ctx, NULL) == 0);
+ CHECK(ecount == 1);
+ ecount = 0;
+ memset(ctmp2, 0, 32);
+ ctmp2[31] = 4;
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0);
+ CHECK(ecount == 2);
+ ecount = 0;
+ memset(ctmp2, 0, 32);
+ ctmp2[31] = 4;
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0);
+ CHECK(ecount == 2);
+ ecount = 0;
+ memset(ctmp2, 0, 32);
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, NULL, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, NULL) == 0);
+ CHECK(ecount == 2);
+ ecount = 0;
+ memset(ctmp2, 0, 32);
+ ctmp2[31] = 1;
+ CHECK(secp256k1_ec_privkey_tweak_mul(ctx, NULL, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, NULL) == 0);
+ CHECK(ecount == 2);
+ ecount = 0;
+ CHECK(secp256k1_ec_pubkey_create(ctx, NULL, ctmp) == 0);
+ CHECK(ecount == 1);
+ memset(&pubkey, 1, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* secp256k1_ec_pubkey_combine tests. */
+ ecount = 0;
+ pubkeys[0] = &pubkey_one;
+ VG_UNDEF(&pubkeys[0], sizeof(secp256k1_pubkey *));
+ VG_UNDEF(&pubkeys[1], sizeof(secp256k1_pubkey *));
+ VG_UNDEF(&pubkeys[2], sizeof(secp256k1_pubkey *));
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(ecount == 2);
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(ecount == 3);
+ pubkeys[0] = &pubkey_negone;
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(ecount == 3);
+ len = 33;
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(memcmp(ctmp, ctmp2, 33) == 0);
+ /* Result is infinity. */
+ pubkeys[0] = &pubkey_one;
+ pubkeys[1] = &pubkey_negone;
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(ecount == 3);
+ /* Passes through infinity but comes out one. */
+ pubkeys[2] = &pubkey_one;
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(ecount == 3);
+ len = 33;
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(memcmp(ctmp, ctmp2, 33) == 0);
+ /* Adds to two. */
+ pubkeys[1] = &pubkey_one;
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(ecount == 3);
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+}
+
+void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) {
+ secp256k1_scalar nonce;
+ do {
+ random_scalar_order_test(&nonce);
+ } while(!secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid));
+}
+
+void test_ecdsa_sign_verify(void) {
+ secp256k1_gej pubj;
+ secp256k1_ge pub;
+ secp256k1_scalar one;
+ secp256k1_scalar msg, key;
+ secp256k1_scalar sigr, sigs;
+ int recid;
+ int getrec;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key);
+ secp256k1_ge_set_gej(&pub, &pubj);
+ getrec = secp256k1_rand_bits(1);
+ random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL);
+ if (getrec) {
+ CHECK(recid >= 0 && recid < 4);
+ }
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+ secp256k1_scalar_set_int(&one, 1);
+ secp256k1_scalar_add(&msg, &msg, &one);
+ CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+}
+
+void run_ecdsa_sign_verify(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_ecdsa_sign_verify();
+ }
+}
+
+/** Dummy nonce generation function that just uses a precomputed nonce, and fails if it is not accepted. Use only for testing. */
+static int precomputed_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ (void)msg32;
+ (void)key32;
+ (void)algo16;
+ memcpy(nonce32, data, 32);
+ return (counter == 0);
+}
+
+static int nonce_function_test_fail(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ /* Dummy nonce generator that has a fatal error on the first counter value. */
+ if (counter == 0) {
+ return 0;
+ }
+ return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 1);
+}
+
+static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ /* Dummy nonce generator that produces unacceptable nonces for the first several counter values. */
+ if (counter < 3) {
+ memset(nonce32, counter==0 ? 0 : 255, 32);
+ if (counter == 2) {
+ nonce32[31]--;
+ }
+ return 1;
+ }
+ if (counter < 5) {
+ static const unsigned char order[] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
+ };
+ memcpy(nonce32, order, 32);
+ if (counter == 4) {
+ nonce32[31]++;
+ }
+ return 1;
+ }
+ /* Retry rate of 6979 is negligible esp. as we only call this in deterministic tests. */
+ /* If someone does fine a case where it retries for secp256k1, we'd like to know. */
+ if (counter > 5) {
+ return 0;
+ }
+ return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5);
+}
+
+int is_empty_signature(const secp256k1_ecdsa_signature *sig) {
+ static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0};
+ return memcmp(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0;
+}
+
+void test_ecdsa_end_to_end(void) {
+ unsigned char extra[32] = {0x00};
+ unsigned char privkey[32];
+ unsigned char message[32];
+ unsigned char privkey2[32];
+ secp256k1_ecdsa_signature signature[6];
+ secp256k1_scalar r, s;
+ unsigned char sig[74];
+ size_t siglen = 74;
+ unsigned char pubkeyc[65];
+ size_t pubkeyclen = 65;
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey pubkey_tmp;
+ unsigned char seckey[300];
+ size_t seckeylen = 300;
+
+ /* Generate a random key and message. */
+ {
+ secp256k1_scalar msg, key;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(privkey, &key);
+ secp256k1_scalar_get_b32(message, &msg);
+ }
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Verify exporting and importing public key. */
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_rand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED));
+ memset(&pubkey, 0, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1);
+
+ /* Verify negation changes the key and changes it back */
+ memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1);
+ CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0);
+ CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1);
+ CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0);
+
+ /* Verify private key import and export. */
+ CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_rand_bits(1) == 1));
+ CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1);
+ CHECK(memcmp(privkey, privkey2, 32) == 0);
+
+ /* Optionally tweak the keys using addition. */
+ if (secp256k1_rand_int(3) == 0) {
+ int ret1;
+ int ret2;
+ unsigned char rnd[32];
+ secp256k1_pubkey pubkey2;
+ secp256k1_rand256_test(rnd);
+ ret1 = secp256k1_ec_privkey_tweak_add(ctx, privkey, rnd);
+ ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd);
+ CHECK(ret1 == ret2);
+ if (ret1 == 0) {
+ return;
+ }
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ }
+
+ /* Optionally tweak the keys using multiplication. */
+ if (secp256k1_rand_int(3) == 0) {
+ int ret1;
+ int ret2;
+ unsigned char rnd[32];
+ secp256k1_pubkey pubkey2;
+ secp256k1_rand256_test(rnd);
+ ret1 = secp256k1_ec_privkey_tweak_mul(ctx, privkey, rnd);
+ ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd);
+ CHECK(ret1 == ret2);
+ if (ret1 == 0) {
+ return;
+ }
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ }
+
+ /* Sign. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1);
+ extra[31] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1);
+ extra[31] = 0;
+ extra[0] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1);
+ CHECK(memcmp(&signature[0], &signature[4], sizeof(signature[0])) == 0);
+ CHECK(memcmp(&signature[0], &signature[1], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[0], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[0], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[1], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[1], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[2], &signature[3], sizeof(signature[0])) != 0);
+ /* Verify. */
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1);
+ /* Test lower-S form, malleate, verify and fail, test again, malleate again */
+ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[0]));
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &signature[0]);
+ secp256k1_scalar_negate(&s, &s);
+ secp256k1_ecdsa_signature_save(&signature[5], &r, &s);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0);
+ CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5]));
+ CHECK(secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5]));
+ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5]));
+ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5]));
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1);
+ secp256k1_scalar_negate(&s, &s);
+ secp256k1_ecdsa_signature_save(&signature[5], &r, &s);
+ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5]));
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1);
+ CHECK(memcmp(&signature[5], &signature[0], 64) == 0);
+
+ /* Serialize/parse DER and verify again */
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
+ memset(&signature[0], 0, sizeof(signature[0]));
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1);
+ /* Serialize/destroy/parse DER and verify again. */
+ siglen = 74;
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
+ sig[secp256k1_rand_int(siglen)] += 1 + secp256k1_rand_int(255);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 ||
+ secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0);
+}
+
+void test_random_pubkeys(void) {
+ secp256k1_ge elem;
+ secp256k1_ge elem2;
+ unsigned char in[65];
+ /* Generate some randomly sized pubkeys. */
+ size_t len = secp256k1_rand_bits(2) == 0 ? 65 : 33;
+ if (secp256k1_rand_bits(2) == 0) {
+ len = secp256k1_rand_bits(6);
+ }
+ if (len == 65) {
+ in[0] = secp256k1_rand_bits(1) ? 4 : (secp256k1_rand_bits(1) ? 6 : 7);
+ } else {
+ in[0] = secp256k1_rand_bits(1) ? 2 : 3;
+ }
+ if (secp256k1_rand_bits(3) == 0) {
+ in[0] = secp256k1_rand_bits(8);
+ }
+ if (len > 1) {
+ secp256k1_rand256(&in[1]);
+ }
+ if (len > 33) {
+ secp256k1_rand256(&in[33]);
+ }
+ if (secp256k1_eckey_pubkey_parse(&elem, in, len)) {
+ unsigned char out[65];
+ unsigned char firstb;
+ int res;
+ size_t size = len;
+ firstb = in[0];
+ /* If the pubkey can be parsed, it should round-trip... */
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, len == 33));
+ CHECK(size == len);
+ CHECK(memcmp(&in[1], &out[1], len-1) == 0);
+ /* ... except for the type of hybrid inputs. */
+ if ((in[0] != 6) && (in[0] != 7)) {
+ CHECK(in[0] == out[0]);
+ }
+ size = 65;
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, in, &size, 0));
+ CHECK(size == 65);
+ CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size));
+ ge_equals_ge(&elem,&elem2);
+ /* Check that the X9.62 hybrid type is checked. */
+ in[0] = secp256k1_rand_bits(1) ? 6 : 7;
+ res = secp256k1_eckey_pubkey_parse(&elem2, in, size);
+ if (firstb == 2 || firstb == 3) {
+ if (in[0] == firstb + 4) {
+ CHECK(res);
+ } else {
+ CHECK(!res);
+ }
+ }
+ if (res) {
+ ge_equals_ge(&elem,&elem2);
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0));
+ CHECK(memcmp(&in[1], &out[1], 64) == 0);
+ }
+ }
+}
+
+void run_random_pubkeys(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_random_pubkeys();
+ }
+}
+
+void run_ecdsa_end_to_end(void) {
+ int i;
+ for (i = 0; i < 64*count; i++) {
+ test_ecdsa_end_to_end();
+ }
+}
+
+int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_der, int certainly_not_der) {
+ static const unsigned char zeroes[32] = {0};
+#ifdef ENABLE_OPENSSL_TESTS
+ static const unsigned char max_scalar[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40
+ };
+#endif
+
+ int ret = 0;
+
+ secp256k1_ecdsa_signature sig_der;
+ unsigned char roundtrip_der[2048];
+ unsigned char compact_der[64];
+ size_t len_der = 2048;
+ int parsed_der = 0, valid_der = 0, roundtrips_der = 0;
+
+ secp256k1_ecdsa_signature sig_der_lax;
+ unsigned char roundtrip_der_lax[2048];
+ unsigned char compact_der_lax[64];
+ size_t len_der_lax = 2048;
+ int parsed_der_lax = 0, valid_der_lax = 0, roundtrips_der_lax = 0;
+
+#ifdef ENABLE_OPENSSL_TESTS
+ ECDSA_SIG *sig_openssl;
+ const BIGNUM *r = NULL, *s = NULL;
+ const unsigned char *sigptr;
+ unsigned char roundtrip_openssl[2048];
+ int len_openssl = 2048;
+ int parsed_openssl, valid_openssl = 0, roundtrips_openssl = 0;
+#endif
+
+ parsed_der = secp256k1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen);
+ if (parsed_der) {
+ ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0;
+ valid_der = (memcmp(compact_der, zeroes, 32) != 0) && (memcmp(compact_der + 32, zeroes, 32) != 0);
+ }
+ if (valid_der) {
+ ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1;
+ roundtrips_der = (len_der == siglen) && memcmp(roundtrip_der, sig, siglen) == 0;
+ }
+
+ parsed_der_lax = ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen);
+ if (parsed_der_lax) {
+ ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10;
+ valid_der_lax = (memcmp(compact_der_lax, zeroes, 32) != 0) && (memcmp(compact_der_lax + 32, zeroes, 32) != 0);
+ }
+ if (valid_der_lax) {
+ ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11;
+ roundtrips_der_lax = (len_der_lax == siglen) && memcmp(roundtrip_der_lax, sig, siglen) == 0;
+ }
+
+ if (certainly_der) {
+ ret |= (!parsed_der) << 2;
+ }
+ if (certainly_not_der) {
+ ret |= (parsed_der) << 17;
+ }
+ if (valid_der) {
+ ret |= (!roundtrips_der) << 3;
+ }
+
+ if (valid_der) {
+ ret |= (!roundtrips_der_lax) << 12;
+ ret |= (len_der != len_der_lax) << 13;
+ ret |= (memcmp(roundtrip_der_lax, roundtrip_der, len_der) != 0) << 14;
+ }
+ ret |= (roundtrips_der != roundtrips_der_lax) << 15;
+ if (parsed_der) {
+ ret |= (!parsed_der_lax) << 16;
+ }
+
+#ifdef ENABLE_OPENSSL_TESTS
+ sig_openssl = ECDSA_SIG_new();
+ sigptr = sig;
+ parsed_openssl = (d2i_ECDSA_SIG(&sig_openssl, &sigptr, siglen) != NULL);
+ if (parsed_openssl) {
+ ECDSA_SIG_get0(sig_openssl, &r, &s);
+ valid_openssl = !BN_is_negative(r) && !BN_is_negative(s) && BN_num_bits(r) > 0 && BN_num_bits(r) <= 256 && BN_num_bits(s) > 0 && BN_num_bits(s) <= 256;
+ if (valid_openssl) {
+ unsigned char tmp[32] = {0};
+ BN_bn2bin(r, tmp + 32 - BN_num_bytes(r));
+ valid_openssl = memcmp(tmp, max_scalar, 32) < 0;
+ }
+ if (valid_openssl) {
+ unsigned char tmp[32] = {0};
+ BN_bn2bin(s, tmp + 32 - BN_num_bytes(s));
+ valid_openssl = memcmp(tmp, max_scalar, 32) < 0;
+ }
+ }
+ len_openssl = i2d_ECDSA_SIG(sig_openssl, NULL);
+ if (len_openssl <= 2048) {
+ unsigned char *ptr = roundtrip_openssl;
+ CHECK(i2d_ECDSA_SIG(sig_openssl, &ptr) == len_openssl);
+ roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (memcmp(roundtrip_openssl, sig, siglen) == 0);
+ } else {
+ len_openssl = 0;
+ }
+ ECDSA_SIG_free(sig_openssl);
+
+ ret |= (parsed_der && !parsed_openssl) << 4;
+ ret |= (valid_der && !valid_openssl) << 5;
+ ret |= (roundtrips_openssl && !parsed_der) << 6;
+ ret |= (roundtrips_der != roundtrips_openssl) << 7;
+ if (roundtrips_openssl) {
+ ret |= (len_der != (size_t)len_openssl) << 8;
+ ret |= (memcmp(roundtrip_der, roundtrip_openssl, len_der) != 0) << 9;
+ }
+#endif
+ return ret;
+}
+
+static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) {
+ size_t i;
+ for (i = 0; i < ptrlen; i++) {
+ int shift = ptrlen - 1 - i;
+ if (shift >= 4) {
+ ptr[i] = 0;
+ } else {
+ ptr[i] = (val >> shift) & 0xFF;
+ }
+ }
+}
+
+static void damage_array(unsigned char *sig, size_t *len) {
+ int pos;
+ int action = secp256k1_rand_bits(3);
+ if (action < 1 && *len > 3) {
+ /* Delete a byte. */
+ pos = secp256k1_rand_int(*len);
+ memmove(sig + pos, sig + pos + 1, *len - pos - 1);
+ (*len)--;
+ return;
+ } else if (action < 2 && *len < 2048) {
+ /* Insert a byte. */
+ pos = secp256k1_rand_int(1 + *len);
+ memmove(sig + pos + 1, sig + pos, *len - pos);
+ sig[pos] = secp256k1_rand_bits(8);
+ (*len)++;
+ return;
+ } else if (action < 4) {
+ /* Modify a byte. */
+ sig[secp256k1_rand_int(*len)] += 1 + secp256k1_rand_int(255);
+ return;
+ } else { /* action < 8 */
+ /* Modify a bit. */
+ sig[secp256k1_rand_int(*len)] ^= 1 << secp256k1_rand_bits(3);
+ return;
+ }
+}
+
+static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly_der, int* certainly_not_der) {
+ int der;
+ int nlow[2], nlen[2], nlenlen[2], nhbit[2], nhbyte[2], nzlen[2];
+ size_t tlen, elen, glen;
+ int indet;
+ int n;
+
+ *len = 0;
+ der = secp256k1_rand_bits(2) == 0;
+ *certainly_der = der;
+ *certainly_not_der = 0;
+ indet = der ? 0 : secp256k1_rand_int(10) == 0;
+
+ for (n = 0; n < 2; n++) {
+ /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */
+ nlow[n] = der ? 1 : (secp256k1_rand_bits(3) != 0);
+ /* The length of the number in bytes (the first byte of which will always be nonzero) */
+ nlen[n] = nlow[n] ? secp256k1_rand_int(33) : 32 + secp256k1_rand_int(200) * secp256k1_rand_int(8) / 8;
+ CHECK(nlen[n] <= 232);
+ /* The top bit of the number. */
+ nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_rand_bits(1));
+ /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */
+ nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_rand_bits(7) : 1 + secp256k1_rand_int(127));
+ /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */
+ nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_rand_int(3) : secp256k1_rand_int(300 - nlen[n]) * secp256k1_rand_int(8) / 8);
+ if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) {
+ *certainly_not_der = 1;
+ }
+ CHECK(nlen[n] + nzlen[n] <= 300);
+ /* The length of the length descriptor for the number. 0 means short encoding, anything else is long encoding. */
+ nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2);
+ if (!der) {
+ /* nlenlen[n] max 127 bytes */
+ int add = secp256k1_rand_int(127 - nlenlen[n]) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256;
+ nlenlen[n] += add;
+ if (add != 0) {
+ *certainly_not_der = 1;
+ }
+ }
+ CHECK(nlen[n] + nzlen[n] + nlenlen[n] <= 427);
+ }
+
+ /* The total length of the data to go, so far */
+ tlen = 2 + nlenlen[0] + nlen[0] + nzlen[0] + 2 + nlenlen[1] + nlen[1] + nzlen[1];
+ CHECK(tlen <= 856);
+
+ /* The length of the garbage inside the tuple. */
+ elen = (der || indet) ? 0 : secp256k1_rand_int(980 - tlen) * secp256k1_rand_int(8) / 8;
+ if (elen != 0) {
+ *certainly_not_der = 1;
+ }
+ tlen += elen;
+ CHECK(tlen <= 980);
+
+ /* The length of the garbage after the end of the tuple. */
+ glen = der ? 0 : secp256k1_rand_int(990 - tlen) * secp256k1_rand_int(8) / 8;
+ if (glen != 0) {
+ *certainly_not_der = 1;
+ }
+ CHECK(tlen + glen <= 990);
+
+ /* Write the tuple header. */
+ sig[(*len)++] = 0x30;
+ if (indet) {
+ /* Indeterminate length */
+ sig[(*len)++] = 0x80;
+ *certainly_not_der = 1;
+ } else {
+ int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2);
+ if (!der) {
+ int add = secp256k1_rand_int(127 - tlenlen) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256;
+ tlenlen += add;
+ if (add != 0) {
+ *certainly_not_der = 1;
+ }
+ }
+ if (tlenlen == 0) {
+ /* Short length notation */
+ sig[(*len)++] = tlen;
+ } else {
+ /* Long length notation */
+ sig[(*len)++] = 128 + tlenlen;
+ assign_big_endian(sig + *len, tlenlen, tlen);
+ *len += tlenlen;
+ }
+ tlen += tlenlen;
+ }
+ tlen += 2;
+ CHECK(tlen + glen <= 1119);
+
+ for (n = 0; n < 2; n++) {
+ /* Write the integer header. */
+ sig[(*len)++] = 0x02;
+ if (nlenlen[n] == 0) {
+ /* Short length notation */
+ sig[(*len)++] = nlen[n] + nzlen[n];
+ } else {
+ /* Long length notation. */
+ sig[(*len)++] = 128 + nlenlen[n];
+ assign_big_endian(sig + *len, nlenlen[n], nlen[n] + nzlen[n]);
+ *len += nlenlen[n];
+ }
+ /* Write zero padding */
+ while (nzlen[n] > 0) {
+ sig[(*len)++] = 0x00;
+ nzlen[n]--;
+ }
+ if (nlen[n] == 32 && !nlow[n]) {
+ /* Special extra 16 0xFF bytes in "high" 32-byte numbers */
+ int i;
+ for (i = 0; i < 16; i++) {
+ sig[(*len)++] = 0xFF;
+ }
+ nlen[n] -= 16;
+ }
+ /* Write first byte of number */
+ if (nlen[n] > 0) {
+ sig[(*len)++] = nhbyte[n];
+ nlen[n]--;
+ }
+ /* Generate remaining random bytes of number */
+ secp256k1_rand_bytes_test(sig + *len, nlen[n]);
+ *len += nlen[n];
+ nlen[n] = 0;
+ }
+
+ /* Generate random garbage inside tuple. */
+ secp256k1_rand_bytes_test(sig + *len, elen);
+ *len += elen;
+
+ /* Generate end-of-contents bytes. */
+ if (indet) {
+ sig[(*len)++] = 0;
+ sig[(*len)++] = 0;
+ tlen += 2;
+ }
+ CHECK(tlen + glen <= 1121);
+
+ /* Generate random garbage outside tuple. */
+ secp256k1_rand_bytes_test(sig + *len, glen);
+ *len += glen;
+ tlen += glen;
+ CHECK(tlen <= 1121);
+ CHECK(tlen == *len);
+}
+
+void run_ecdsa_der_parse(void) {
+ int i,j;
+ for (i = 0; i < 200 * count; i++) {
+ unsigned char buffer[2048];
+ size_t buflen = 0;
+ int certainly_der = 0;
+ int certainly_not_der = 0;
+ random_ber_signature(buffer, &buflen, &certainly_der, &certainly_not_der);
+ CHECK(buflen <= 2048);
+ for (j = 0; j < 16; j++) {
+ int ret = 0;
+ if (j > 0) {
+ damage_array(buffer, &buflen);
+ /* We don't know anything anymore about the DERness of the result */
+ certainly_der = 0;
+ certainly_not_der = 0;
+ }
+ ret = test_ecdsa_der_parse(buffer, buflen, certainly_der, certainly_not_der);
+ if (ret != 0) {
+ size_t k;
+ fprintf(stderr, "Failure %x on ", ret);
+ for (k = 0; k < buflen; k++) {
+ fprintf(stderr, "%02x ", buffer[k]);
+ }
+ fprintf(stderr, "\n");
+ }
+ CHECK(ret == 0);
+ }
+ }
+}
+
+/* Tests several edge cases. */
+void test_ecdsa_edge_cases(void) {
+ int t;
+ secp256k1_ecdsa_signature sig;
+
+ /* Test the case where ECDSA recomputes a point that is infinity. */
+ {
+ secp256k1_gej keyj;
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ secp256k1_scalar_inverse(&ss, &ss);
+ secp256k1_scalar_set_int(&sr, 1);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr);
+ secp256k1_ge_set_gej(&key, &keyj);
+ msg = ss;
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /* Verify signature with r of zero fails. */
+ {
+ const unsigned char pubkey_mods_zero[33] = {
+ 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0,
+ 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41,
+ 0x41
+ };
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_set_int(&msg, 0);
+ secp256k1_scalar_set_int(&sr, 0);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey_mods_zero, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /* Verify signature with s of zero fails. */
+ {
+ const unsigned char pubkey[33] = {
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01
+ };
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 0);
+ secp256k1_scalar_set_int(&msg, 0);
+ secp256k1_scalar_set_int(&sr, 1);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /* Verify signature with message 0 passes. */
+ {
+ const unsigned char pubkey[33] = {
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02
+ };
+ const unsigned char pubkey2[33] = {
+ 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0,
+ 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41,
+ 0x43
+ };
+ secp256k1_ge key;
+ secp256k1_ge key2;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 2);
+ secp256k1_scalar_set_int(&msg, 0);
+ secp256k1_scalar_set_int(&sr, 2);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33));
+ CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1);
+ secp256k1_scalar_set_int(&ss, 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0);
+ }
+
+ /* Verify signature with message 1 passes. */
+ {
+ const unsigned char pubkey[33] = {
+ 0x02, 0x14, 0x4e, 0x5a, 0x58, 0xef, 0x5b, 0x22,
+ 0x6f, 0xd2, 0xe2, 0x07, 0x6a, 0x77, 0xcf, 0x05,
+ 0xb4, 0x1d, 0xe7, 0x4a, 0x30, 0x98, 0x27, 0x8c,
+ 0x93, 0xe6, 0xe6, 0x3c, 0x0b, 0xc4, 0x73, 0x76,
+ 0x25
+ };
+ const unsigned char pubkey2[33] = {
+ 0x02, 0x8a, 0xd5, 0x37, 0xed, 0x73, 0xd9, 0x40,
+ 0x1d, 0xa0, 0x33, 0xd2, 0xdc, 0xf0, 0xaf, 0xae,
+ 0x34, 0xcf, 0x5f, 0x96, 0x4c, 0x73, 0x28, 0x0f,
+ 0x92, 0xc0, 0xf6, 0x9d, 0xd9, 0xb2, 0x09, 0x10,
+ 0x62
+ };
+ const unsigned char csr[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4,
+ 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xeb
+ };
+ secp256k1_ge key;
+ secp256k1_ge key2;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_set_int(&msg, 1);
+ secp256k1_scalar_set_b32(&sr, csr, NULL);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33));
+ CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1);
+ secp256k1_scalar_set_int(&ss, 2);
+ secp256k1_scalar_inverse_var(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0);
+ }
+
+ /* Verify signature with message -1 passes. */
+ {
+ const unsigned char pubkey[33] = {
+ 0x03, 0xaf, 0x97, 0xff, 0x7d, 0x3a, 0xf6, 0xa0,
+ 0x02, 0x94, 0xbd, 0x9f, 0x4b, 0x2e, 0xd7, 0x52,
+ 0x28, 0xdb, 0x49, 0x2a, 0x65, 0xcb, 0x1e, 0x27,
+ 0x57, 0x9c, 0xba, 0x74, 0x20, 0xd5, 0x1d, 0x20,
+ 0xf1
+ };
+ const unsigned char csr[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4,
+ 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xee
+ };
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_set_int(&msg, 1);
+ secp256k1_scalar_negate(&msg, &msg);
+ secp256k1_scalar_set_b32(&sr, csr, NULL);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ secp256k1_scalar_set_int(&ss, 3);
+ secp256k1_scalar_inverse_var(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /* Signature where s would be zero. */
+ {
+ secp256k1_pubkey pubkey;
+ size_t siglen;
+ int32_t ecount;
+ unsigned char signature[72];
+ static const unsigned char nonce[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ static const unsigned char nonce2[32] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x40
+ };
+ const unsigned char key[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ unsigned char msg[32] = {
+ 0x86, 0x41, 0x99, 0x81, 0x06, 0x23, 0x44, 0x53,
+ 0xaa, 0x5f, 0x9d, 0x6a, 0x31, 0x78, 0xf4, 0xf7,
+ 0xb8, 0x12, 0xe0, 0x0b, 0x81, 0x7a, 0x77, 0x62,
+ 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9,
+ };
+ ecount = 0;
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0);
+ msg[31] = 0xaa;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, key) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, NULL) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0);
+ CHECK(ecount == 7);
+ /* That pubkeyload fails via an ARGCHECK is a little odd but makes sense because pubkeys are an opaque data type. */
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0);
+ CHECK(ecount == 8);
+ siglen = 72;
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0);
+ CHECK(ecount == 9);
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0);
+ CHECK(ecount == 10);
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0);
+ CHECK(ecount == 13);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1);
+ CHECK(ecount == 13);
+ siglen = 10;
+ /* Too little room for a signature does not fail via ARGCHECK. */
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0);
+ CHECK(ecount == 13);
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, NULL) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1);
+ CHECK(ecount == 5);
+ memset(signature, 255, 64);
+ CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0);
+ CHECK(ecount == 5);
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+ }
+
+ /* Nonce function corner cases. */
+ for (t = 0; t < 2; t++) {
+ static const unsigned char zero[32] = {0x00};
+ int i;
+ unsigned char key[32];
+ unsigned char msg[32];
+ secp256k1_ecdsa_signature sig2;
+ secp256k1_scalar sr[512], ss;
+ const unsigned char *extra;
+ extra = t == 0 ? NULL : zero;
+ memset(msg, 0, 32);
+ msg[31] = 1;
+ /* High key results in signature failure. */
+ memset(key, 0xFF, 32);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* Zero key results in signature failure. */
+ memset(key, 0, 32);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* Nonce function failure results in signature failure. */
+ key[31] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* The retry loop successfully makes its way to the first good value. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1);
+ CHECK(!is_empty_signature(&sig));
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ /* The default nonce function is deterministic. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ /* The default nonce function changes output with different messages. */
+ for(i = 0; i < 256; i++) {
+ int j;
+ msg[0] = i;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2);
+ for (j = 0; j < i; j++) {
+ CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j]));
+ }
+ }
+ msg[0] = 0;
+ msg[31] = 2;
+ /* The default nonce function changes output with different keys. */
+ for(i = 256; i < 512; i++) {
+ int j;
+ key[0] = i - 256;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2);
+ for (j = 0; j < i; j++) {
+ CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j]));
+ }
+ }
+ key[0] = 0;
+ }
+
+ {
+ /* Check that optional nonce arguments do not have equivalent effect. */
+ const unsigned char zeros[32] = {0};
+ unsigned char nonce[32];
+ unsigned char nonce2[32];
+ unsigned char nonce3[32];
+ unsigned char nonce4[32];
+ VG_UNDEF(nonce,32);
+ VG_UNDEF(nonce2,32);
+ VG_UNDEF(nonce3,32);
+ VG_UNDEF(nonce4,32);
+ CHECK(nonce_function_rfc6979(nonce, zeros, zeros, NULL, NULL, 0) == 1);
+ VG_CHECK(nonce,32);
+ CHECK(nonce_function_rfc6979(nonce2, zeros, zeros, zeros, NULL, 0) == 1);
+ VG_CHECK(nonce2,32);
+ CHECK(nonce_function_rfc6979(nonce3, zeros, zeros, NULL, (void *)zeros, 0) == 1);
+ VG_CHECK(nonce3,32);
+ CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1);
+ VG_CHECK(nonce4,32);
+ CHECK(memcmp(nonce, nonce2, 32) != 0);
+ CHECK(memcmp(nonce, nonce3, 32) != 0);
+ CHECK(memcmp(nonce, nonce4, 32) != 0);
+ CHECK(memcmp(nonce2, nonce3, 32) != 0);
+ CHECK(memcmp(nonce2, nonce4, 32) != 0);
+ CHECK(memcmp(nonce3, nonce4, 32) != 0);
+ }
+
+
+ /* Privkey export where pubkey is the point at infinity. */
+ {
+ unsigned char privkey[300];
+ unsigned char seckey[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41,
+ };
+ size_t outlen = 300;
+ CHECK(!ec_privkey_export_der(ctx, privkey, &outlen, seckey, 0));
+ outlen = 300;
+ CHECK(!ec_privkey_export_der(ctx, privkey, &outlen, seckey, 1));
+ }
+}
+
+void run_ecdsa_edge_cases(void) {
+ test_ecdsa_edge_cases();
+}
+
+#ifdef ENABLE_OPENSSL_TESTS
+EC_KEY *get_openssl_key(const unsigned char *key32) {
+ unsigned char privkey[300];
+ size_t privkeylen;
+ const unsigned char* pbegin = privkey;
+ int compr = secp256k1_rand_bits(1);
+ EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1);
+ CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr));
+ CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen));
+ CHECK(EC_KEY_check_key(ec_key));
+ return ec_key;
+}
+
+void test_ecdsa_openssl(void) {
+ secp256k1_gej qj;
+ secp256k1_ge q;
+ secp256k1_scalar sigr, sigs;
+ secp256k1_scalar one;
+ secp256k1_scalar msg2;
+ secp256k1_scalar key, msg;
+ EC_KEY *ec_key;
+ unsigned int sigsize = 80;
+ size_t secp_sigsize = 80;
+ unsigned char message[32];
+ unsigned char signature[80];
+ unsigned char key32[32];
+ secp256k1_rand256_test(message);
+ secp256k1_scalar_set_b32(&msg, message, NULL);
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(key32, &key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key);
+ secp256k1_ge_set_gej(&q, &qj);
+ ec_key = get_openssl_key(key32);
+ CHECK(ec_key != NULL);
+ CHECK(ECDSA_sign(0, message, sizeof(message), signature, &sigsize, ec_key));
+ CHECK(secp256k1_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg));
+ secp256k1_scalar_set_int(&one, 1);
+ secp256k1_scalar_add(&msg2, &msg, &one);
+ CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2));
+
+ random_sign(&sigr, &sigs, &key, &msg, NULL);
+ CHECK(secp256k1_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs));
+ CHECK(ECDSA_verify(0, message, sizeof(message), signature, secp_sigsize, ec_key) == 1);
+
+ EC_KEY_free(ec_key);
+}
+
+void run_ecdsa_openssl(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_ecdsa_openssl();
+ }
+}
+#endif
+
+#ifdef ENABLE_MODULE_ECDH
+# include "modules/ecdh/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+# include "modules/recovery/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_GENERATOR
+# include "modules/generator/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_COMMITMENT
+# include "modules/commitment/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_RANGEPROOF
+# include "modules/rangeproof/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_BULLETPROOF
+# include "modules/bulletproofs/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_WHITELIST
+# include "modules/whitelist/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_SURJECTIONPROOF
+# include "modules/surjection/tests_impl.h"
+#endif
+
+int main(int argc, char **argv) {
+ unsigned char seed16[16] = {0};
+ unsigned char run32[32] = {0};
+ /* find iteration count */
+ if (argc > 1) {
+ count = strtol(argv[1], NULL, 0);
+ }
+
+ /* find random seed */
+ if (argc > 2) {
+ int pos = 0;
+ const char* ch = argv[2];
+ while (pos < 16 && ch[0] != 0 && ch[1] != 0) {
+ unsigned short sh;
+ if (sscanf(ch, "%2hx", &sh)) {
+ seed16[pos] = sh;
+ } else {
+ break;
+ }
+ ch += 2;
+ pos++;
+ }
+ } else {
+ FILE *frand = fopen("/dev/urandom", "r");
+ if ((frand == NULL) || fread(&seed16, sizeof(seed16), 1, frand) != sizeof(seed16)) {
+ uint64_t t = time(NULL) * (uint64_t)1337;
+ seed16[0] ^= t;
+ seed16[1] ^= t >> 8;
+ seed16[2] ^= t >> 16;
+ seed16[3] ^= t >> 24;
+ seed16[4] ^= t >> 32;
+ seed16[5] ^= t >> 40;
+ seed16[6] ^= t >> 48;
+ seed16[7] ^= t >> 56;
+ }
+ if (frand) {
+ fclose(frand);
+ }
+ }
+ secp256k1_rand_seed(seed16);
+
+ printf("test count = %i\n", count);
+ printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
+
+ /* initialize */
+ run_context_tests();
+ run_scratch_tests();
+ ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ if (secp256k1_rand_bits(1)) {
+ secp256k1_rand256(run32);
+ CHECK(secp256k1_context_randomize(ctx, secp256k1_rand_bits(1) ? run32 : NULL));
+ }
+
+ run_rand_bits();
+ run_rand_int();
+ run_util_tests();
+
+ run_sha256_tests();
+ run_hmac_sha256_tests();
+ run_rfc6979_hmac_sha256_tests();
+
+#ifndef USE_NUM_NONE
+ /* num tests */
+ run_num_smalltests();
+#endif
+
+ /* scalar tests */
+ run_scalar_tests();
+
+ /* field tests */
+ run_field_inv();
+ run_field_inv_var();
+ run_field_inv_all_var();
+ run_field_misc();
+ run_field_convert();
+ run_sqr();
+ run_sqrt();
+
+ /* group tests */
+ run_ge();
+ run_group_decompress();
+
+ /* ecmult tests */
+ run_wnaf();
+ run_point_times_order();
+ run_ecmult_chain();
+ run_ecmult_constants();
+ run_ecmult_gen_blind();
+ run_ecmult_const_tests();
+ run_ecmult_multi_tests();
+ run_ec_combine();
+
+ /* endomorphism tests */
+#ifdef USE_ENDOMORPHISM
+ run_endomorphism_tests();
+#endif
+
+ /* EC point parser test */
+ run_ec_pubkey_parse_test();
+
+ /* EC key edge cases */
+ run_eckey_edge_case_test();
+
+#ifdef ENABLE_MODULE_ECDH
+ /* ecdh tests */
+ run_ecdh_tests();
+#endif
+
+ /* ecdsa tests */
+ run_random_pubkeys();
+ run_ecdsa_der_parse();
+ run_ecdsa_sign_verify();
+ run_ecdsa_end_to_end();
+ run_ecdsa_edge_cases();
+#ifdef ENABLE_OPENSSL_TESTS
+ run_ecdsa_openssl();
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+ /* ECDSA pubkey recovery tests */
+ run_recovery_tests();
+#endif
+
+#ifdef ENABLE_MODULE_GENERATOR
+ run_generator_tests();
+#endif
+
+#ifdef ENABLE_MODULE_RANGEPROOF
+ run_rangeproof_tests();
+#endif
+
+#ifdef ENABLE_MODULE_BULLETPROOF
+ run_bulletproofs_tests();
+#endif
+
+#ifdef ENABLE_MODULE_WHITELIST
+ /* Key whitelisting tests */
+ run_whitelist_tests();
+#endif
+
+#ifdef ENABLE_MODULE_SURJECTIONPROOF
+ run_surjection_tests();
+#endif
+
+ secp256k1_rand256(run32);
+ printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
+
+ /* shutdown */
+ secp256k1_context_destroy(ctx);
+
+ printf("no problems found\n");
+ return 0;
+}
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/tests_exhaustive.c b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/tests_exhaustive.c
new file mode 100644
index 0000000..ab9779b
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/tests_exhaustive.c
@@ -0,0 +1,511 @@
+/***********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include
+#include
+
+#include
+
+#undef USE_ECMULT_STATIC_PRECOMPUTATION
+
+#ifndef EXHAUSTIVE_TEST_ORDER
+/* see group_impl.h for allowable values */
+#define EXHAUSTIVE_TEST_ORDER 13
+#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */
+#endif
+
+#include "include/secp256k1.h"
+#include "group.h"
+#include "secp256k1.c"
+#include "testrand_impl.h"
+
+#ifdef ENABLE_MODULE_RECOVERY
+#include "src/modules/recovery/main_impl.h"
+#include "include/secp256k1_recovery.h"
+#endif
+
+/** stolen from tests.c */
+void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ CHECK(secp256k1_fe_equal_var(&a->x, &b->x));
+ CHECK(secp256k1_fe_equal_var(&a->y, &b->y));
+}
+
+void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
+ secp256k1_fe z2s;
+ secp256k1_fe u1, u2, s1, s2;
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
+ secp256k1_fe_sqr(&z2s, &b->z);
+ secp256k1_fe_mul(&u1, &a->x, &z2s);
+ u2 = b->x; secp256k1_fe_normalize_weak(&u2);
+ secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z);
+ s2 = b->y; secp256k1_fe_normalize_weak(&s2);
+ CHECK(secp256k1_fe_equal_var(&u1, &u2));
+ CHECK(secp256k1_fe_equal_var(&s1, &s2));
+}
+
+void random_fe(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ secp256k1_rand256(bin);
+ if (secp256k1_fe_set_b32(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+/** END stolen from tests.c */
+
+int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
+ const unsigned char *key32, const unsigned char *algo16,
+ void *data, unsigned int attempt) {
+ secp256k1_scalar s;
+ int *idata = data;
+ (void)msg32;
+ (void)key32;
+ (void)algo16;
+ /* Some nonces cannot be used because they'd cause s and/or r to be zero.
+ * The signing function has retry logic here that just re-calls the nonce
+ * function with an increased `attempt`. So if attempt > 0 this means we
+ * need to change the nonce to avoid an infinite loop. */
+ if (attempt > 0) {
+ *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER;
+ }
+ secp256k1_scalar_set_int(&s, *idata);
+ secp256k1_scalar_get_b32(nonce32, &s);
+ return 1;
+}
+
+#ifdef USE_ENDOMORPHISM
+void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) {
+ int i;
+ for (i = 0; i < order; i++) {
+ secp256k1_ge res;
+ secp256k1_ge_mul_lambda(&res, &group[i]);
+ ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
+ }
+}
+#endif
+
+void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+ int i, j;
+
+ /* Sanity-check (and check infinity functions) */
+ CHECK(secp256k1_ge_is_infinity(&group[0]));
+ CHECK(secp256k1_gej_is_infinity(&groupj[0]));
+ for (i = 1; i < order; i++) {
+ CHECK(!secp256k1_ge_is_infinity(&group[i]));
+ CHECK(!secp256k1_gej_is_infinity(&groupj[i]));
+ }
+
+ /* Check all addition formulae */
+ for (j = 0; j < order; j++) {
+ secp256k1_fe fe_inv;
+ secp256k1_fe_inv(&fe_inv, &groupj[j].z);
+ for (i = 0; i < order; i++) {
+ secp256k1_ge zless_gej;
+ secp256k1_gej tmp;
+ /* add_var */
+ secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ /* add_ge */
+ if (j > 0) {
+ secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ }
+ /* add_ge_var */
+ secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ /* add_zinv_var */
+ zless_gej.infinity = groupj[j].infinity;
+ zless_gej.x = groupj[j].x;
+ zless_gej.y = groupj[j].y;
+ secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ }
+ }
+
+ /* Check doubling */
+ for (i = 0; i < order; i++) {
+ secp256k1_gej tmp;
+ if (i > 0) {
+ secp256k1_gej_double_nonzero(&tmp, &groupj[i], NULL);
+ ge_equals_gej(&group[(2 * i) % order], &tmp);
+ }
+ secp256k1_gej_double_var(&tmp, &groupj[i], NULL);
+ ge_equals_gej(&group[(2 * i) % order], &tmp);
+ }
+
+ /* Check negation */
+ for (i = 1; i < order; i++) {
+ secp256k1_ge tmp;
+ secp256k1_gej tmpj;
+ secp256k1_ge_neg(&tmp, &group[i]);
+ ge_equals_ge(&group[order - i], &tmp);
+ secp256k1_gej_neg(&tmpj, &groupj[i]);
+ ge_equals_gej(&group[order - i], &tmpj);
+ }
+}
+
+void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+ int i, j, r_log;
+ for (r_log = 1; r_log < order; r_log++) {
+ for (j = 0; j < order; j++) {
+ for (i = 0; i < order; i++) {
+ secp256k1_gej tmp;
+ secp256k1_scalar na, ng;
+ secp256k1_scalar_set_int(&na, i);
+ secp256k1_scalar_set_int(&ng, j);
+
+ secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
+ ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
+
+ if (i > 0) {
+ secp256k1_ecmult_const(&tmp, &group[i], &ng, 256);
+ ge_equals_gej(&group[(i * j) % order], &tmp);
+ }
+ }
+ }
+ }
+}
+
+typedef struct {
+ secp256k1_scalar sc[2];
+ secp256k1_ge pt[2];
+} ecmult_multi_data;
+
+static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) {
+ ecmult_multi_data *data = (ecmult_multi_data*) cbdata;
+ *sc = data->sc[idx];
+ *pt = data->pt[idx];
+ return 1;
+}
+
+void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int i, j, k, x, y;
+ secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096);
+ for (i = 0; i < order; i++) {
+ for (j = 0; j < order; j++) {
+ for (k = 0; k < order; k++) {
+ for (x = 0; x < order; x++) {
+ for (y = 0; y < order; y++) {
+ secp256k1_gej tmp;
+ secp256k1_scalar g_sc;
+ ecmult_multi_data data;
+
+ secp256k1_scalar_set_int(&data.sc[0], i);
+ secp256k1_scalar_set_int(&data.sc[1], j);
+ secp256k1_scalar_set_int(&g_sc, k);
+ data.pt[0] = group[x];
+ data.pt[1] = group[y];
+
+ secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
+ ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp);
+ }
+ }
+ }
+ }
+ }
+ secp256k1_scratch_destroy(scratch);
+}
+
+void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {
+ secp256k1_fe x;
+ unsigned char x_bin[32];
+ k %= EXHAUSTIVE_TEST_ORDER;
+ x = group[k].x;
+ secp256k1_fe_normalize(&x);
+ secp256k1_fe_get_b32(x_bin, &x);
+ secp256k1_scalar_set_b32(r, x_bin, NULL);
+}
+
+void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int s, r, msg, key;
+ for (s = 1; s < order; s++) {
+ for (r = 1; r < order; r++) {
+ for (msg = 1; msg < order; msg++) {
+ for (key = 1; key < order; key++) {
+ secp256k1_ge nonconst_ge;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pk;
+ secp256k1_scalar sk_s, msg_s, r_s, s_s;
+ secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
+ int k, should_verify;
+ unsigned char msg32[32];
+
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_set_int(&r_s, r);
+ secp256k1_scalar_set_int(&msg_s, msg);
+ secp256k1_scalar_set_int(&sk_s, key);
+
+ /* Verify by hand */
+ /* Run through every k value that gives us this r and check that *one* works.
+ * Note there could be none, there could be multiple, ECDSA is weird. */
+ should_verify = 0;
+ for (k = 0; k < order; k++) {
+ secp256k1_scalar check_x_s;
+ r_from_k(&check_x_s, group, k);
+ if (r_s == check_x_s) {
+ secp256k1_scalar_set_int(&s_times_k_s, k);
+ secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
+ secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
+ secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
+ should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
+ }
+ }
+ /* nb we have a "high s" rule */
+ should_verify &= !secp256k1_scalar_is_high(&s_s);
+
+ /* Verify by calling verify */
+ secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s);
+ memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
+ secp256k1_pubkey_save(&pk, &nonconst_ge);
+ secp256k1_scalar_get_b32(msg32, &msg_s);
+ CHECK(should_verify ==
+ secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
+ }
+ }
+ }
+ }
+}
+
+void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int i, j, k;
+
+ /* Loop */
+ for (i = 1; i < order; i++) { /* message */
+ for (j = 1; j < order; j++) { /* key */
+ for (k = 1; k < order; k++) { /* nonce */
+ const int starting_k = k;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_scalar sk, msg, r, s, expected_r;
+ unsigned char sk32[32], msg32[32];
+ secp256k1_scalar_set_int(&msg, i);
+ secp256k1_scalar_set_int(&sk, j);
+ secp256k1_scalar_get_b32(sk32, &sk);
+ secp256k1_scalar_get_b32(msg32, &msg);
+
+ secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
+ /* Note that we compute expected_r *after* signing -- this is important
+ * because our nonce-computing function function might change k during
+ * signing. */
+ r_from_k(&expected_r, group, k);
+ CHECK(r == expected_r);
+ CHECK((k * s) % order == (i + r * j) % order ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+
+ /* Overflow means we've tried every possible nonce */
+ if (k < starting_k) {
+ break;
+ }
+ }
+ }
+ }
+
+ /* We would like to verify zero-knowledge here by counting how often every
+ * possible (s, r) tuple appears, but because the group order is larger
+ * than the field order, when coercing the x-values to scalar values, some
+ * appear more often than others, so we are actually not zero-knowledge.
+ * (This effect also appears in the real code, but the difference is on the
+ * order of 1/2^128th the field order, so the deviation is not useful to a
+ * computationally bounded attacker.)
+ */
+}
+
+#ifdef ENABLE_MODULE_RECOVERY
+void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int i, j, k;
+
+ /* Loop */
+ for (i = 1; i < order; i++) { /* message */
+ for (j = 1; j < order; j++) { /* key */
+ for (k = 1; k < order; k++) { /* nonce */
+ const int starting_k = k;
+ secp256k1_fe r_dot_y_normalized;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_scalar sk, msg, r, s, expected_r;
+ unsigned char sk32[32], msg32[32];
+ int expected_recid;
+ int recid;
+ secp256k1_scalar_set_int(&msg, i);
+ secp256k1_scalar_set_int(&sk, j);
+ secp256k1_scalar_get_b32(sk32, &sk);
+ secp256k1_scalar_get_b32(msg32, &msg);
+
+ secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+
+ /* Check directly */
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
+ r_from_k(&expected_r, group, k);
+ CHECK(r == expected_r);
+ CHECK((k * s) % order == (i + r * j) % order ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+ /* In computing the recid, there is an overflow condition that is disabled in
+ * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
+ * will exceed the group order, and our signing code always holds out for r
+ * values that don't overflow, so with a proper overflow check the tests would
+ * loop indefinitely. */
+ r_dot_y_normalized = group[k].y;
+ secp256k1_fe_normalize(&r_dot_y_normalized);
+ /* Also the recovery id is flipped depending if we hit the low-s branch */
+ if ((k * s) % order == (i + r * j) % order) {
+ expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0;
+ } else {
+ expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1;
+ }
+ CHECK(recid == expected_recid);
+
+ /* Convert to a standard sig then check */
+ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
+ /* Note that we compute expected_r *after* signing -- this is important
+ * because our nonce-computing function function might change k during
+ * signing. */
+ r_from_k(&expected_r, group, k);
+ CHECK(r == expected_r);
+ CHECK((k * s) % order == (i + r * j) % order ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+
+ /* Overflow means we've tried every possible nonce */
+ if (k < starting_k) {
+ break;
+ }
+ }
+ }
+ }
+}
+
+void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ /* This is essentially a copy of test_exhaustive_verify, with recovery added */
+ int s, r, msg, key;
+ for (s = 1; s < order; s++) {
+ for (r = 1; r < order; r++) {
+ for (msg = 1; msg < order; msg++) {
+ for (key = 1; key < order; key++) {
+ secp256k1_ge nonconst_ge;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pk;
+ secp256k1_scalar sk_s, msg_s, r_s, s_s;
+ secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
+ int recid = 0;
+ int k, should_verify;
+ unsigned char msg32[32];
+
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_set_int(&r_s, r);
+ secp256k1_scalar_set_int(&msg_s, msg);
+ secp256k1_scalar_set_int(&sk_s, key);
+ secp256k1_scalar_get_b32(msg32, &msg_s);
+
+ /* Verify by hand */
+ /* Run through every k value that gives us this r and check that *one* works.
+ * Note there could be none, there could be multiple, ECDSA is weird. */
+ should_verify = 0;
+ for (k = 0; k < order; k++) {
+ secp256k1_scalar check_x_s;
+ r_from_k(&check_x_s, group, k);
+ if (r_s == check_x_s) {
+ secp256k1_scalar_set_int(&s_times_k_s, k);
+ secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
+ secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
+ secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
+ should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
+ }
+ }
+ /* nb we have a "high s" rule */
+ should_verify &= !secp256k1_scalar_is_high(&s_s);
+
+ /* We would like to try recovering the pubkey and checking that it matches,
+ * but pubkey recovery is impossible in the exhaustive tests (the reason
+ * being that there are 12 nonzero r values, 12 nonzero points, and no
+ * overlap between the sets, so there are no valid signatures). */
+
+ /* Verify by converting to a standard signature and calling verify */
+ secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
+ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
+ memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
+ secp256k1_pubkey_save(&pk, &nonconst_ge);
+ CHECK(should_verify ==
+ secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
+ }
+ }
+ }
+ }
+}
+#endif
+
+int main(void) {
+ int i;
+ secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
+ secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
+
+ /* Build context */
+ secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ /* TODO set z = 1, then do num_tests runs with random z values */
+
+ /* Generate the entire group */
+ secp256k1_gej_set_infinity(&groupj[0]);
+ secp256k1_ge_set_gej(&group[0], &groupj[0]);
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ /* Set a different random z-value for each Jacobian point */
+ secp256k1_fe z;
+ random_fe(&z);
+
+ secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
+ secp256k1_ge_set_gej(&group[i], &groupj[i]);
+ secp256k1_gej_rescale(&groupj[i], &z);
+
+ /* Verify against ecmult_gen */
+ {
+ secp256k1_scalar scalar_i;
+ secp256k1_gej generatedj;
+ secp256k1_ge generated;
+
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
+ secp256k1_ge_set_gej(&generated, &generatedj);
+
+ CHECK(group[i].infinity == 0);
+ CHECK(generated.infinity == 0);
+ CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
+ CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
+ }
+ }
+
+ /* Run the tests */
+#ifdef USE_ENDOMORPHISM
+ test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER);
+#endif
+ test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_ecmult_multi(ctx, group, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
+
+#ifdef ENABLE_MODULE_RECOVERY
+ test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
+#endif
+
+ secp256k1_context_destroy(ctx);
+ return 0;
+}
+
diff --git a/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/util.h b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/util.h
new file mode 100644
index 0000000..aca79d7
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/libsecp256k1/src/util.h
@@ -0,0 +1,147 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille, Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_UTIL_H
+#define SECP256K1_UTIL_H
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include
+#include
+#include
+
+typedef struct {
+ void (*fn)(const char *text, void* data);
+ const void* data;
+} secp256k1_callback;
+
+static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * const cb, const char * const text) {
+ cb->fn(text, (void*)cb->data);
+}
+
+#ifdef DETERMINISTIC
+#define TEST_FAILURE(msg) do { \
+ fprintf(stderr, "%s\n", msg); \
+ abort(); \
+} while(0);
+#else
+#define TEST_FAILURE(msg) do { \
+ fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg); \
+ abort(); \
+} while(0)
+#endif
+
+#ifdef HAVE_BUILTIN_EXPECT
+#define EXPECT(x,c) __builtin_expect((x),(c))
+#else
+#define EXPECT(x,c) (x)
+#endif
+
+#ifdef DETERMINISTIC
+#define CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ TEST_FAILURE("test condition failed"); \
+ } \
+} while(0)
+#else
+#define CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ TEST_FAILURE("test condition failed: " #cond); \
+ } \
+} while(0)
+#endif
+
+/* Like assert(), but when VERIFY is defined, and side-effect safe. */
+#if defined(COVERAGE)
+#define VERIFY_CHECK(check)
+#define VERIFY_SETUP(stmt)
+#elif defined(VERIFY)
+#define VERIFY_CHECK CHECK
+#define VERIFY_SETUP(stmt) do { stmt; } while(0)
+#else
+#define VERIFY_CHECK(cond) do { (void)(cond); } while(0)
+#define VERIFY_SETUP(stmt)
+#endif
+
+static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) {
+ void *ret = malloc(size);
+ if (ret == NULL) {
+ secp256k1_callback_call(cb, "Out of memory");
+ }
+ return ret;
+}
+
+static SECP256K1_INLINE void *checked_realloc(const secp256k1_callback* cb, void *ptr, size_t size) {
+ void *ret = realloc(ptr, size);
+ if (ret == NULL) {
+ secp256k1_callback_call(cb, "Out of memory");
+ }
+ return ret;
+}
+
+/* Extract the sign of an int64, take the abs and return a uint64, constant time. */
+SECP256K1_INLINE static int secp256k1_sign_and_abs64(uint64_t *out, int64_t in) {
+ uint64_t mask0, mask1;
+ int ret;
+ ret = in < 0;
+ mask0 = ret + ~((uint64_t)0);
+ mask1 = ~mask0;
+ *out = (uint64_t)in;
+ *out = (*out & mask0) | ((~*out + 1) & mask1);
+ return ret;
+}
+
+SECP256K1_INLINE static int secp256k1_clz64_var(uint64_t x) {
+ int ret;
+ if (!x) {
+ return 64;
+ }
+# if defined(HAVE_BUILTIN_CLZLL)
+ ret = __builtin_clzll(x);
+# else
+ /*FIXME: debruijn fallback. */
+ for (ret = 0; ((x & (1ULL << 63)) == 0); x <<= 1, ret++);
+# endif
+ return ret;
+}
+
+/* Macro for restrict, when available and not in a VERIFY build. */
+#if defined(SECP256K1_BUILD) && defined(VERIFY)
+# define SECP256K1_RESTRICT
+#else
+# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) )
+# if SECP256K1_GNUC_PREREQ(3,0)
+# define SECP256K1_RESTRICT __restrict__
+# elif (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define SECP256K1_RESTRICT __restrict
+# else
+# define SECP256K1_RESTRICT
+# endif
+# else
+# define SECP256K1_RESTRICT restrict
+# endif
+#endif
+
+#if defined(_WIN32)
+# define I64FORMAT "I64d"
+# define I64uFORMAT "I64u"
+#else
+# define I64FORMAT "lld"
+# define I64uFORMAT "llu"
+#endif
+
+#if defined(HAVE___INT128)
+# if defined(__GNUC__)
+# define SECP256K1_GNUC_EXT __extension__
+# else
+# define SECP256K1_GNUC_EXT
+# endif
+SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t;
+#endif
+
+#endif /* SECP256K1_UTIL_H */
diff --git a/src/ConfidentialTx/crypto/secp256k1/panic_cb.go b/src/ConfidentialTx/crypto/secp256k1/panic_cb.go
new file mode 100644
index 0000000..e0e9034
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/panic_cb.go
@@ -0,0 +1,33 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package secp256k1
+
+import "C"
+import "unsafe"
+
+// Callbacks for converting libsecp256k1 internal faults into
+// recoverable Go panics.
+
+//export secp256k1GoPanicIllegal
+func secp256k1GoPanicIllegal(msg *C.char, data unsafe.Pointer) {
+ panic("illegal argument: " + C.GoString(msg))
+}
+
+//export secp256k1GoPanicError
+func secp256k1GoPanicError(msg *C.char, data unsafe.Pointer) {
+ panic("internal error: " + C.GoString(msg))
+}
diff --git a/src/ConfidentialTx/crypto/secp256k1/secp256.go b/src/ConfidentialTx/crypto/secp256k1/secp256.go
new file mode 100644
index 0000000..e72f524
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/secp256.go
@@ -0,0 +1,172 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package secp256k1 wraps the bitcoin secp256k1 C library.
+package secp256k1
+
+/*
+#cgo CFLAGS: -I./libsecp256k1
+#cgo CFLAGS: -I./libsecp256k1/src/
+#define USE_NUM_NONE
+#define USE_FIELD_10X26
+#define USE_FIELD_INV_BUILTIN
+#define USE_SCALAR_8X32
+#define USE_SCALAR_INV_BUILTIN
+#define NDEBUG
+#define ENABLE_MODULE_GENERATOR
+#define ENABLE_MODULE_BULLETPROOF
+#define SECP256K1_MODULE_BULLETPROOF_TESTS
+#include "./libsecp256k1/src/secp256k1.c"
+#include "./libsecp256k1/src/modules/recovery/main_impl.h"
+#include "./libsecp256k1/src/bench_bulletproof.c"
+#include "./libsecp256k1/src/modules/bulletproofs/tests_impl.h"
+#include "ext.h"
+
+typedef void (*callbackFunc) (const char* msg, void* data);
+extern void secp256k1GoPanicIllegal(const char* msg, void* data);
+extern void secp256k1GoPanicError(const char* msg, void* data);
+extern void test_rangeproof();
+extern void setup_rangeproof(zkrp_t *dt);
+extern void commit_rangeproof(zkrp_t *dt);
+extern void prove_rangeproof(zkrp_t *dt);
+extern int verify_rangeproof(zkrp_t *dt);
+*/
+import "C"
+
+import (
+ "errors"
+ "unsafe"
+)
+
+var context *C.secp256k1_context
+
+func init() {
+ // around 20 ms on a modern CPU.
+ context = C.secp256k1_context_create_sign_verify()
+ C.secp256k1_context_set_illegal_callback(context, C.callbackFunc(C.secp256k1GoPanicIllegal), nil)
+ C.secp256k1_context_set_error_callback(context, C.callbackFunc(C.secp256k1GoPanicError), nil)
+}
+
+var (
+ ErrInvalidMsgLen = errors.New("invalid message length, need 32 bytes")
+ ErrInvalidSignatureLen = errors.New("invalid signature length")
+ ErrInvalidRecoveryID = errors.New("invalid signature recovery id")
+ ErrInvalidKey = errors.New("invalid private key")
+ ErrSignFailed = errors.New("signing failed")
+ ErrRecoverFailed = errors.New("recovery failed")
+)
+
+// Sign creates a recoverable ECDSA signature.
+// The produced signature is in the 65-byte [R || S || V] format where V is 0 or 1.
+//
+// The caller is responsible for ensuring that msg cannot be chosen
+// directly by an attacker. It is usually preferable to use a cryptographic
+// hash function on any input before handing it to this function.
+func Sign(msg []byte, seckey []byte) ([]byte, error) {
+ if len(msg) != 32 {
+ return nil, ErrInvalidMsgLen
+ }
+ if len(seckey) != 32 {
+ return nil, ErrInvalidKey
+ }
+ seckeydata := (*C.uchar)(unsafe.Pointer(&seckey[0]))
+ if C.secp256k1_ec_seckey_verify(context, seckeydata) != 1 {
+ return nil, ErrInvalidKey
+ }
+
+ var (
+ msgdata = (*C.uchar)(unsafe.Pointer(&msg[0]))
+ noncefunc = C.secp256k1_nonce_function_rfc6979
+ sigstruct C.secp256k1_ecdsa_recoverable_signature
+ )
+ if C.secp256k1_ecdsa_sign_recoverable(context, &sigstruct, msgdata, seckeydata, noncefunc, nil) == 0 {
+ return nil, ErrSignFailed
+ }
+
+ var (
+ sig = make([]byte, 65)
+ sigdata = (*C.uchar)(unsafe.Pointer(&sig[0]))
+ recid C.int
+ )
+ C.secp256k1_ecdsa_recoverable_signature_serialize_compact(context, sigdata, &recid, &sigstruct)
+ sig[64] = byte(recid) // add back recid to get 65 bytes sig
+ return sig, nil
+}
+
+// RecoverPubkey returns the the public key of the signer.
+// msg must be the 32-byte hash of the message to be signed.
+// sig must be a 65-byte compact ECDSA signature containing the
+// recovery id as the last element.
+func RecoverPubkey(msg []byte, sig []byte) ([]byte, error) {
+ if len(msg) != 32 {
+ return nil, ErrInvalidMsgLen
+ }
+ if err := checkSignature(sig); err != nil {
+ return nil, err
+ }
+
+ var (
+ pubkey = make([]byte, 65)
+ sigdata = (*C.uchar)(unsafe.Pointer(&sig[0]))
+ msgdata = (*C.uchar)(unsafe.Pointer(&msg[0]))
+ )
+ if C.secp256k1_ecdsa_recover_pubkey(context, (*C.uchar)(unsafe.Pointer(&pubkey[0])), sigdata, msgdata) == 0 {
+ return nil, ErrRecoverFailed
+ }
+ return pubkey, nil
+}
+
+func checkSignature(sig []byte) error {
+ if len(sig) != 65 {
+ return ErrInvalidSignatureLen
+ }
+ if sig[64] >= 4 {
+ return ErrInvalidRecoveryID
+ }
+ return nil
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+// Rangeproofs functions: should be separated from this file in the near future
+////////////////////////////////////////////////////////////////////////////////////
+
+/*func RunRangeproof() {
+ C.test_rangeproof()
+}
+
+func SetupRangeproof(nbits _Ctype_ulong) (*C.zkrp_t) {
+ var dt *C.zkrp_t
+ dt = new(C.zkrp_t)
+ dt.nbits = nbits
+ C.setup_rangeproof(dt)
+ return dt
+}
+
+func PrintRangeproof(dt *C.zkrp_t) {
+ C.myprint(C.CString("GO print"), dt)
+}
+
+func CommitRangeproof(dt *C.zkrp_t) {
+ C.commit_rangeproof(dt)
+}
+
+func ProveRangeproof(dt *C.zkrp_t) {
+ C.prove_rangeproof(dt)
+}
+
+func VerifyRangeproof(dt *C.zkrp_t) (bool) {
+ return (C.verify_rangeproof(dt) == 1)
+}*/
diff --git a/src/ConfidentialTx/crypto/secp256k1/secp256_test.go b/src/ConfidentialTx/crypto/secp256k1/secp256_test.go
new file mode 100644
index 0000000..7877855
--- /dev/null
+++ b/src/ConfidentialTx/crypto/secp256k1/secp256_test.go
@@ -0,0 +1,263 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package secp256k1
+
+import (
+ "testing"
+ "fmt"
+ "time"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/crypto/randentropy"
+ "bytes"
+ "encoding/hex"
+)
+
+const TestCount = 1000
+
+func generateKeyPair() (pubkey, privkey []byte) {
+ key, err := ecdsa.GenerateKey(S256(), rand.Reader)
+ if err != nil {
+ panic(err)
+ }
+ pubkey = elliptic.Marshal(S256(), key.X, key.Y)
+ return pubkey, math.PaddedBigBytes(key.D, 32)
+}
+
+func randSig() []byte {
+ sig := randentropy.GetEntropyCSPRNG(65)
+ sig[32] &= 0x70
+ sig[64] %= 4
+ return sig
+}
+
+// tests for malleability
+// highest bit of signature ECDSA s value must be 0, in the 33th byte
+func compactSigCheck(t *testing.T, sig []byte) {
+ var b int = int(sig[32])
+ if b < 0 {
+ t.Errorf("highest bit is negative: %d", b)
+ }
+ if ((b >> 7) == 1) != ((b & 0x80) == 0x80) {
+ t.Errorf("highest bit: %d bit >> 7: %d", b, b>>7)
+ }
+ if (b & 0x80) == 0x80 {
+ t.Errorf("highest bit: %d bit & 0x80: %d", b, b&0x80)
+ }
+}
+
+func TestSignatureValidity(t *testing.T) {
+ pubkey, seckey := generateKeyPair()
+ msg := randentropy.GetEntropyCSPRNG(32)
+ sig, err := Sign(msg, seckey)
+ if err != nil {
+ t.Errorf("signature error: %s", err)
+ }
+ compactSigCheck(t, sig)
+ if len(pubkey) != 65 {
+ t.Errorf("pubkey length mismatch: want: 65 have: %d", len(pubkey))
+ }
+ if len(seckey) != 32 {
+ t.Errorf("seckey length mismatch: want: 32 have: %d", len(seckey))
+ }
+ if len(sig) != 65 {
+ t.Errorf("sig length mismatch: want: 65 have: %d", len(sig))
+ }
+ recid := int(sig[64])
+ if recid > 4 || recid < 0 {
+ t.Errorf("sig recid mismatch: want: within 0 to 4 have: %d", int(sig[64]))
+ }
+}
+
+func TestInvalidRecoveryID(t *testing.T) {
+ _, seckey := generateKeyPair()
+ msg := randentropy.GetEntropyCSPRNG(32)
+ sig, _ := Sign(msg, seckey)
+ sig[64] = 99
+ _, err := RecoverPubkey(msg, sig)
+ if err != ErrInvalidRecoveryID {
+ t.Fatalf("got %q, want %q", err, ErrInvalidRecoveryID)
+ }
+}
+
+func TestSignAndRecover(t *testing.T) {
+ pubkey1, seckey := generateKeyPair()
+ msg := randentropy.GetEntropyCSPRNG(32)
+ sig, err := Sign(msg, seckey)
+ if err != nil {
+ t.Errorf("signature error: %s", err)
+ }
+ pubkey2, err := RecoverPubkey(msg, sig)
+ if err != nil {
+ t.Errorf("recover error: %s", err)
+ }
+ if !bytes.Equal(pubkey1, pubkey2) {
+ t.Errorf("pubkey mismatch: want: %x have: %x", pubkey1, pubkey2)
+ }
+}
+
+func TestSignDeterministic(t *testing.T) {
+ _, seckey := generateKeyPair()
+ msg := make([]byte, 32)
+ copy(msg, "hi there")
+
+ sig1, err := Sign(msg, seckey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sig2, err := Sign(msg, seckey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(sig1, sig2) {
+ t.Fatal("signatures not equal")
+ }
+}
+
+func TestRandomMessagesWithSameKey(t *testing.T) {
+ pubkey, seckey := generateKeyPair()
+ keys := func() ([]byte, []byte) {
+ return pubkey, seckey
+ }
+ signAndRecoverWithRandomMessages(t, keys)
+}
+
+func TestRandomMessagesWithRandomKeys(t *testing.T) {
+ keys := func() ([]byte, []byte) {
+ pubkey, seckey := generateKeyPair()
+ return pubkey, seckey
+ }
+ signAndRecoverWithRandomMessages(t, keys)
+}
+
+func signAndRecoverWithRandomMessages(t *testing.T, keys func() ([]byte, []byte)) {
+ for i := 0; i < TestCount; i++ {
+ pubkey1, seckey := keys()
+ msg := randentropy.GetEntropyCSPRNG(32)
+ sig, err := Sign(msg, seckey)
+ if err != nil {
+ t.Fatalf("signature error: %s", err)
+ }
+ if sig == nil {
+ t.Fatal("signature is nil")
+ }
+ compactSigCheck(t, sig)
+
+ // TODO: why do we flip around the recovery id?
+ sig[len(sig)-1] %= 4
+
+ pubkey2, err := RecoverPubkey(msg, sig)
+ if err != nil {
+ t.Fatalf("recover error: %s", err)
+ }
+ if pubkey2 == nil {
+ t.Error("pubkey is nil")
+ }
+ if !bytes.Equal(pubkey1, pubkey2) {
+ t.Fatalf("pubkey mismatch: want: %x have: %x", pubkey1, pubkey2)
+ }
+ }
+}
+
+func TestRecoveryOfRandomSignature(t *testing.T) {
+ pubkey1, _ := generateKeyPair()
+ msg := randentropy.GetEntropyCSPRNG(32)
+
+ for i := 0; i < TestCount; i++ {
+ // recovery can sometimes work, but if so should always give wrong pubkey
+ pubkey2, _ := RecoverPubkey(msg, randSig())
+ if bytes.Equal(pubkey1, pubkey2) {
+ t.Fatalf("iteration: %d: pubkey mismatch: do NOT want %x: ", i, pubkey2)
+ }
+ }
+}
+
+func TestRandomMessagesAgainstValidSig(t *testing.T) {
+ pubkey1, seckey := generateKeyPair()
+ msg := randentropy.GetEntropyCSPRNG(32)
+ sig, _ := Sign(msg, seckey)
+
+ for i := 0; i < TestCount; i++ {
+ msg = randentropy.GetEntropyCSPRNG(32)
+ pubkey2, _ := RecoverPubkey(msg, sig)
+ // recovery can sometimes work, but if so should always give wrong pubkey
+ if bytes.Equal(pubkey1, pubkey2) {
+ t.Fatalf("iteration: %d: pubkey mismatch: do NOT want %x: ", i, pubkey2)
+ }
+ }
+}
+
+// Useful when the underlying libsecp256k1 API changes to quickly
+// check only recover function without use of signature function
+func TestRecoverSanity(t *testing.T) {
+ msg, _ := hex.DecodeString("ce0677bb30baa8cf067c88db9811f4333d131bf8bcf12fe7065d211dce971008")
+ sig, _ := hex.DecodeString("90f27b8b488db00b00606796d2987f6a5f59ae62ea05effe84fef5b8b0e549984a691139ad57a3f0b906637673aa2f63d1f55cb1a69199d4009eea23ceaddc9301")
+ pubkey1, _ := hex.DecodeString("04e32df42865e97135acfb65f3bae71bdc86f4d49150ad6a440b6f15878109880a0a2b2667f7e725ceea70c673093bf67663e0312623c8e091b13cf2c0f11ef652")
+ pubkey2, err := RecoverPubkey(msg, sig)
+ if err != nil {
+ t.Fatalf("recover error: %s", err)
+ }
+ if !bytes.Equal(pubkey1, pubkey2) {
+ t.Errorf("pubkey mismatch: want: %x have: %x", pubkey1, pubkey2)
+ }
+}
+
+func TestRangeproof(t *testing.T) {
+ RunRangeproof()
+ startTime := time.Now()
+ dt := SetupRangeproof(32)
+ setupTime := time.Now()
+ fmt.Println("Setup time:")
+ fmt.Println(setupTime.Sub(startTime))
+ CommitRangeproof(dt)
+ commitTime := time.Now()
+ fmt.Println("Commit time:")
+ fmt.Println(commitTime.Sub(setupTime))
+ ProveRangeproof(dt)
+ proveTime := time.Now()
+ fmt.Println("Prove time:")
+ fmt.Println(proveTime.Sub(commitTime))
+ result := VerifyRangeproof(dt)
+ verifyTime := time.Now()
+ fmt.Println("Verify time:")
+ fmt.Println(verifyTime.Sub(proveTime))
+ fmt.Println("result:")
+ fmt.Println(result)
+}
+
+func BenchmarkSign(b *testing.B) {
+ _, seckey := generateKeyPair()
+ msg := randentropy.GetEntropyCSPRNG(32)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ Sign(msg, seckey)
+ }
+}
+
+func BenchmarkRecover(b *testing.B) {
+ msg := randentropy.GetEntropyCSPRNG(32)
+ _, seckey := generateKeyPair()
+ sig, _ := Sign(msg, seckey)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ RecoverPubkey(msg, sig)
+ }
+}
diff --git a/src/ConfidentialTx/main.go b/src/ConfidentialTx/main.go
new file mode 100644
index 0000000..641db6a
--- /dev/null
+++ b/src/ConfidentialTx/main.go
@@ -0,0 +1,67 @@
+package main
+
+import (
+ "fmt"
+ "math/big"
+
+ "./zkproofs"
+)
+
+func main() {
+ var (
+ zkrpX zkproofs.Bp
+ zkrpY zkproofs.Bp
+ zkrpZ zkproofs.Bp
+ )
+ // common setup
+ zkrpX.Setup(0, 4294967296)
+ zkrpY.Setup(0, 4294967296)
+ zkrpZ.Setup(0, 4294967296)
+
+ // value
+ x := new(big.Int).SetInt64(30)
+ y := new(big.Int).SetInt64(20)
+ z := new(big.Int).SetInt64(10)
+ // get blind factor, pedersen commit and zkproof
+ // 这里会更新 zkrpX,所以 zkrpX 和 proofX 都需要保存下来
+ blindFactorX, commitmentX, proofX, _ := zkrpX.Prove(x)
+
+ // zkrp, _ := zkproofs.LoadParamFromDisk("setup.dat")
+ var ok bool
+ // proofX中包含commitmentX,如果修改了proofX.V 验证也不会通过
+ ok, err := zkrpX.Verify(proofX)
+ if !ok {
+ fmt.Println("proofX failed!!!")
+ fmt.Println(ok)
+ fmt.Println(err)
+ }
+
+ blindFactorY, commitmentY, proofY, _ := zkrpY.Prove(y)
+
+ // zkrp, _ = zkproofs.LoadParamFromDisk("setup.dat")
+ ok, _ = zkrpY.Verify(proofY)
+ if !ok {
+ fmt.Println("proofY failed!!!")
+ }
+
+ blindFactorZ, commitmentZ, proofZ, _ := zkrpZ.Prove(z)
+ // zkrp, _ = zkproofs.LoadParamFromDisk("setup.dat")
+ ok, err = zkrpZ.Verify(proofZ)
+ if !ok {
+ fmt.Println("proofZ failed!!!")
+ }
+ blindOut := new(big.Int).Add(blindFactorY, blindFactorZ)
+ blindDiff := new(big.Int).Sub(blindFactorX, blindOut)
+
+ commitmentOut := commitmentY.Add(commitmentY, commitmentZ)
+ commitmentDiff := commitmentX.Add(commitmentX, commitmentOut.Neg(commitmentOut))
+
+ fmt.Printf("blind diff: %d\n", blindDiff)
+ fmt.Printf("pedersen commitment diff: ( %d , %d )\n", commitmentDiff.X, commitmentDiff.Y)
+
+ check := zkproofs.Mult(zkrpX.H, blindDiff)
+ fmt.Printf("check diff: ( %d , %d )\n", check.X, check.Y)
+ fmt.Println(check.X.Cmp(commitmentDiff.X) == 0)
+ fmt.Println(check.Y.Cmp(commitmentDiff.Y) == 0)
+
+}
diff --git a/src/ConfidentialTx/zkproofs/bb.go b/src/ConfidentialTx/zkproofs/bb.go
new file mode 100644
index 0000000..70ef505
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/bb.go
@@ -0,0 +1,104 @@
+// Copyright 2017 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package zkproofs
+
+/*
+This file contains the implementation of the BB signature scheme proposed in the paper:
+Short signatures without random oracle
+Boneh and Boyen
+Eurocrypt 2004
+*/
+
+import (
+ "crypto/rand"
+ "errors"
+ "math/big"
+
+ "../crypto/bn256"
+)
+
+type keypair struct {
+ pubk *bn256.G1
+ privk *big.Int
+}
+
+/*
+keygen is responsible for the key generation.
+*/
+func keygen() (keypair, error) {
+ var (
+ kp keypair
+ e error
+ res bool
+ )
+ kp.privk, e = rand.Int(rand.Reader, bn256.Order)
+ if e != nil {
+ return kp, e
+ }
+ kp.pubk, res = new(bn256.G1).Unmarshal(new(bn256.G1).ScalarBaseMult(kp.privk).Marshal())
+ if res {
+ return kp, errors.New("Could not compute scalar multiplication.")
+ }
+ return kp, e
+}
+
+/*
+sign receives as input a message and a private key and outputs a digital signature.
+*/
+func sign(m *big.Int, privk *big.Int) (*bn256.G2, error) {
+ var (
+ res bool
+ signature *bn256.G2
+ )
+ inv := ModInverse(Mod(Add(m, privk), bn256.Order), bn256.Order)
+ signature, res = new(bn256.G2).Unmarshal(new(bn256.G2).ScalarBaseMult(inv).Marshal())
+ if res != false {
+ return signature, nil
+ } else {
+ return nil, errors.New("Error while computing signature.")
+ }
+}
+
+/*
+verify receives as input the digital signature, the message and the public key. It outputs
+true if and only if the signature is valid.
+*/
+func verify(signature *bn256.G2, m *big.Int, pubk *bn256.G1) (bool, error) {
+ // e(y.g^m, sig) = e(g1,g2)
+ var (
+ gm *bn256.G1
+ e, res bool
+ )
+ // g^m
+ gm, e = new(bn256.G1).Unmarshal(new(bn256.G1).ScalarBaseMult(m).Marshal())
+ // y.g^m
+ gm = gm.Add(gm, pubk)
+ // e(y.g^m, sig)
+ p1 := bn256.Pair(gm, signature)
+ // e(g1,g2)
+ g1 := new(bn256.G1).ScalarBaseMult(new(big.Int).SetInt64(1))
+ g2 := new(bn256.G2).ScalarBaseMult(new(big.Int).SetInt64(1))
+ p2 := bn256.Pair(g1, g2)
+ // p1 == p2?
+ p2 = p2.Neg(p2)
+ p1 = p1.Add(p1, p2)
+ res = p1.IsOne()
+ if e != false {
+ return res, nil
+ }
+ return false, errors.New("Error while computing signature.")
+}
diff --git a/src/ConfidentialTx/zkproofs/bb_test.go b/src/ConfidentialTx/zkproofs/bb_test.go
new file mode 100644
index 0000000..6c524be
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/bb_test.go
@@ -0,0 +1,33 @@
+// Copyright 2017 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package zkproofs
+
+import (
+ "testing"
+ "math/big"
+)
+
+func TestKeyGen(t *testing.T) {
+ kp, _ := keygen()
+ signature, _ := sign(big.NewInt(42), kp.privk)
+ res, _ := verify(signature, big.NewInt(42), kp.pubk)
+ if res != true {
+ t.Errorf("Assert failure: expected true, actual: %t", res)
+ t.Fail()
+ }
+}
+
diff --git a/src/ConfidentialTx/zkproofs/bn.go b/src/ConfidentialTx/zkproofs/bn.go
new file mode 100644
index 0000000..14ff674
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/bn.go
@@ -0,0 +1,75 @@
+// Copyright 2017 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package zkproofs
+
+import (
+ "crypto/sha256"
+ "math/big"
+
+ "../byteconversion"
+)
+
+var k1 = new(big.Int).SetBit(big.NewInt(0), 160, 1) // 2^160, security parameter that should match prover
+
+func CalculateHash(b1 *big.Int, b2 *big.Int) (*big.Int, error) {
+
+ digest := sha256.New()
+ digest.Write(byteconversion.ToByteArray(b1))
+ if b2 != nil {
+ digest.Write(byteconversion.ToByteArray(b2))
+ }
+ output := digest.Sum(nil)
+ tmp := output[0:len(output)]
+ return byteconversion.FromByteArray(tmp)
+}
+
+/**
+ * Returns base**exponent mod |modulo| also works for negative exponent (contrary to big.Int.Exp)
+ */
+func ModPow(base *big.Int, exponent *big.Int, modulo *big.Int) *big.Int {
+
+ var returnValue *big.Int
+
+ if exponent.Cmp(big.NewInt(0)) >= 0 {
+ returnValue = new(big.Int).Exp(base, exponent, modulo)
+ } else {
+ // Exp doesn't support negative exponent so instead:
+ // use positive exponent than take inverse (modulo)..
+ returnValue = ModInverse(new(big.Int).Exp(base, new(big.Int).Abs(exponent), modulo), modulo)
+ }
+ return returnValue
+}
+
+func Add(x *big.Int, y *big.Int) *big.Int {
+ return new(big.Int).Add(x, y)
+}
+
+func Sub(x *big.Int, y *big.Int) *big.Int {
+ return new(big.Int).Sub(x, y)
+}
+
+func Mod(base *big.Int, modulo *big.Int) *big.Int {
+ return new(big.Int).Mod(base, modulo)
+}
+
+func Multiply(factor1 *big.Int, factor2 *big.Int) *big.Int {
+ return new(big.Int).Mul(factor1, factor2)
+}
+
+func ModInverse(base *big.Int, modulo *big.Int) *big.Int {
+ return new(big.Int).ModInverse(base, modulo)
+}
diff --git a/src/ConfidentialTx/zkproofs/bn_test.go b/src/ConfidentialTx/zkproofs/bn_test.go
new file mode 100644
index 0000000..880d5a7
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/bn_test.go
@@ -0,0 +1,129 @@
+// Copyright 2017 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package zkproofs
+
+import (
+ "testing"
+ "math/big"
+ )
+
+
+func TestCalculateHash(t * testing.T) {
+
+ a := GetBigInt("20905485153255974750600830283139712767405035066172127447413526262122898097752829902691919420016794244099612526431387099905077116995490485444167190551980224865082320241670546533063409921082864323224863076823319894932240914571396941354556281385023649535909639921239646795929610627460276589386330363348840105387073757406261480377763345436612442076323102518362946991582624513737241437269968051355243751819094759669539075841991633425362795570590507959822047022497500292880734028347273355847985904992235033659931679254742902977502890883426551960403450937665750386501228142099266824028488862959626463948822181376617128628357")
+ b := GetBigInt("5711912074763938920844020768820827016918638588776093786691324830937965710562669998102969607754216881533101753509522661181935679768137553251696427895001308210043958162362474454915118307661021406997989560047755201343617470288619030784987198511772840498354380632474664457429003510207310347179884080000294301502325103527312780599913053243627156705417875172756769585807691558680079741149166677442267851492473670184071199725213912264373214980177804010561543807969309223405291240876888702197126709861726023144260487044339708816278182396486957437256069194438047922679665536060592545457448379589893428429445378466414731324407")
+
+ expectedResult := GetBigInt("-19913561841364303941087968013056854925409568225408501509608065500928998362191")
+ actualResult, _ := CalculateHash(a, b)
+ actualResult2, _ := CalculateHash(a, b)
+
+ if expectedResult.Cmp(actualResult) != 0 {
+ t.Errorf("Assert failure: hashed is: %s", actualResult)
+ }
+ if expectedResult.Cmp(actualResult2) != 0 {
+ t.Errorf("Assert failure: hashed 2 is: %s", actualResult2)
+ }
+}
+
+func TestModPow1(t *testing.T) {
+
+ base := big.NewInt(10)
+ exponent := big.NewInt(3)
+ modulo := big.NewInt(7)
+
+ result := ModPow(base, exponent, modulo)
+
+ if result.Cmp(big.NewInt(6)) != 0 {
+ t.Errorf("Assert failure: expected 6, actual: %s", result)
+ }
+}
+
+func TestModPow2(t *testing.T) {
+
+ base := big.NewInt(30)
+ exponent := big.NewInt(2)
+ modulo := big.NewInt(7)
+
+ var result = ModPow(base, exponent, modulo)
+
+ if result.Cmp(big.NewInt(4)) != 0 {
+ t.Errorf("Assert failure: expected 4, actual: %s", result)
+ }
+}
+
+func TestModPowNegativeExp1(t *testing.T) {
+
+ result := ModPow(big.NewInt(16), big.NewInt(-1), big.NewInt(7))
+
+ if result.Cmp(big.NewInt(4)) != 0 {
+ t.Errorf("Assert failure: expected 4, actual: %s", result)
+ }
+}
+
+func TestModPowNegativeExp2(t *testing.T) {
+
+ result := ModPow(big.NewInt(34), big.NewInt(-2), big.NewInt(9))
+
+ if result.Cmp(big.NewInt(7)) != 0 {
+ t.Errorf("Assert failure: expected 7, actual: %s", result)
+ }
+}
+
+func TestModInverse1(t *testing.T) {
+
+ base := big.NewInt(5)
+ modulo := big.NewInt(1)
+
+ var result = ModInverse(base, modulo)
+
+ if result.Cmp(big.NewInt(0)) != 0 {
+ t.Errorf("Assert failure: expected 0, actual: %s", result)
+ }
+}
+
+func TestModInverse2(t *testing.T) {
+
+ base := big.NewInt(3)
+ modulo := big.NewInt(7)
+
+ var result = ModInverse(base, modulo)
+
+ if result.Cmp(big.NewInt(5)) != 0 {
+ t.Errorf("Assert failure: expected 5, actual: %s", result)
+ }
+}
+
+func TestMultiply(t *testing.T) {
+
+ factor1 := big.NewInt(3)
+ factor2 := big.NewInt(7)
+
+ var result = Multiply(factor1, factor2)
+ if result.Cmp(big.NewInt(21)) != 0 {
+ t.Errorf("Assert failure: expected 21, actual: %s", result)
+ }
+}
+
+func TestMod(t *testing.T) {
+
+ result := Mod(big.NewInt(16), big.NewInt(7))
+
+ if result.Cmp(big.NewInt(2)) != 0 {
+ t.Errorf("Assert failure: expected 2, actual: %s", result)
+ }
+}
+
diff --git a/src/ConfidentialTx/zkproofs/bulletproofs.go b/src/ConfidentialTx/zkproofs/bulletproofs.go
new file mode 100644
index 0000000..37c13c6
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/bulletproofs.go
@@ -0,0 +1,1358 @@
+// Copyright 2018 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+/*
+This file contains the implementation of the Bulletproofs scheme proposed in the paper:
+Bulletproofs: Short Proofs for Confidential Transactions and More
+Benedikt Bunz, Jonathan Bootle, Dan Boneh, Andrew Poelstra, Pieter Wuille and Greg Maxwell
+Asiacrypt 2008
+*/
+
+package zkproofs
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "math"
+ "math/big"
+
+ "../byteconversion"
+)
+
+var (
+ ORDER = CURVE.N
+ SEEDH = "BulletproofsDoesNotNeedTrustedSetupH"
+ SEEDU = "BulletproofsDoesNotNeedTrustedSetupU"
+ SAVE = true
+)
+
+/*
+Bulletproofs parameters.
+*/
+type Bp struct {
+ N int64 // n 位
+ G *p256 // 曲线上的点 G 和 H
+ H *p256
+ Gg []*p256
+ Hh []*p256
+ Zkip bip
+}
+
+/*
+Bulletproofs proof.
+*/
+type proofBP struct {
+ V *p256
+ A *p256
+ S *p256
+ T1 *p256
+ T2 *p256
+ Taux *big.Int
+ Mu *big.Int
+ Tprime *big.Int
+ Proofip proofBip
+ Commit *p256
+}
+
+type (
+ pstring struct {
+ X string
+ Y string
+ }
+)
+
+type (
+ ipstring struct {
+ N int64
+ A string
+ B string
+ U pstring
+ P pstring
+ Gg pstring
+ Hh pstring
+ Ls []pstring
+ Rs []pstring
+ }
+)
+
+func (p *proofBP) MarshalJSON() ([]byte, error) {
+ type Alias proofBP
+ var iLs []pstring
+ var iRs []pstring
+ var i int
+ logn := len(p.Proofip.Ls)
+ iLs = make([]pstring, logn)
+ iRs = make([]pstring, logn)
+ i = 0
+ for i < logn {
+ iLs[i] = pstring{X: p.Proofip.Ls[i].X.String(), Y: p.Proofip.Ls[i].Y.String()}
+ iRs[i] = pstring{X: p.Proofip.Rs[i].X.String(), Y: p.Proofip.Rs[i].Y.String()}
+ i = i + 1
+ }
+ return json.Marshal(&struct {
+ V pstring `json:"V"`
+ A pstring `json:"A"`
+ S pstring `json:"S"`
+ T1 pstring `json:"T1"`
+ T2 pstring `json:"T2"`
+ Taux string `json:"Taux"`
+ Mu string `json:"Mu"`
+ Tprime string `json:"Tprime"`
+ Commit pstring `json:"Commit"`
+ Proofip ipstring `json:"Proofip"`
+ *Alias
+ }{
+ V: pstring{X: p.V.X.String(), Y: p.V.Y.String()},
+ A: pstring{X: p.A.X.String(), Y: p.A.Y.String()},
+ S: pstring{X: p.S.X.String(), Y: p.S.Y.String()},
+ T1: pstring{X: p.T1.X.String(), Y: p.T1.Y.String()},
+ T2: pstring{X: p.T2.X.String(), Y: p.T2.Y.String()},
+ Mu: p.Mu.String(),
+ Taux: p.Taux.String(),
+ Tprime: p.Tprime.String(),
+ Commit: pstring{X: p.Commit.X.String(), Y: p.Commit.Y.String()},
+ Proofip: ipstring{
+ N: p.Proofip.N,
+ A: p.Proofip.A.String(),
+ B: p.Proofip.B.String(),
+ U: pstring{X: p.Proofip.U.X.String(), Y: p.Proofip.U.Y.String()},
+ P: pstring{X: p.Proofip.P.X.String(), Y: p.Proofip.P.Y.String()},
+ Gg: pstring{X: p.Proofip.Gg.X.String(), Y: p.Proofip.Gg.Y.String()},
+ Hh: pstring{X: p.Proofip.Hh.X.String(), Y: p.Proofip.Hh.Y.String()},
+ Ls: iLs,
+ Rs: iRs,
+ },
+ Alias: (*Alias)(p),
+ })
+}
+
+func (p *proofBP) UnmarshalJSON(data []byte) error {
+ type Alias proofBP
+ aux := &struct {
+ V pstring `json:"V"`
+ A pstring `json:"A"`
+ S pstring `json:"S"`
+ T1 pstring `json:"T1"`
+ T2 pstring `json:"T2"`
+ Taux string `json:"Taux"`
+ Mu string `json:"Mu"`
+ Tprime string `json:"Tprime"`
+ Commit pstring `json:"Commit"`
+ Proofip ipstring `json:"Proofip"`
+ *Alias
+ }{
+ Alias: (*Alias)(p),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ valVX, _ := new(big.Int).SetString(aux.V.X, 10)
+ valVY, _ := new(big.Int).SetString(aux.V.Y, 10)
+ valAX, _ := new(big.Int).SetString(aux.A.X, 10)
+ valAY, _ := new(big.Int).SetString(aux.A.Y, 10)
+ valSX, _ := new(big.Int).SetString(aux.S.X, 10)
+ valSY, _ := new(big.Int).SetString(aux.S.Y, 10)
+ valT1X, _ := new(big.Int).SetString(aux.T1.X, 10)
+ valT1Y, _ := new(big.Int).SetString(aux.T1.Y, 10)
+ valT2X, _ := new(big.Int).SetString(aux.T2.X, 10)
+ valT2Y, _ := new(big.Int).SetString(aux.T2.Y, 10)
+ valCommitX, _ := new(big.Int).SetString(aux.Commit.X, 10)
+ valCommitY, _ := new(big.Int).SetString(aux.Commit.Y, 10)
+ valN := aux.Proofip.N
+ valA, _ := new(big.Int).SetString(aux.Proofip.A, 10)
+ valB, _ := new(big.Int).SetString(aux.Proofip.B, 10)
+ valUx, _ := new(big.Int).SetString(aux.Proofip.U.X, 10)
+ valUy, _ := new(big.Int).SetString(aux.Proofip.U.Y, 10)
+ valPx, _ := new(big.Int).SetString(aux.Proofip.P.X, 10)
+ valPy, _ := new(big.Int).SetString(aux.Proofip.P.Y, 10)
+ valGgx, _ := new(big.Int).SetString(aux.Proofip.Gg.X, 10)
+ valGgy, _ := new(big.Int).SetString(aux.Proofip.Gg.Y, 10)
+ valHhx, _ := new(big.Int).SetString(aux.Proofip.Hh.X, 10)
+ valHhy, _ := new(big.Int).SetString(aux.Proofip.Hh.Y, 10)
+ p.V = &p256{
+ X: valVX,
+ Y: valVY,
+ }
+ p.A = &p256{
+ X: valAX,
+ Y: valAY,
+ }
+ p.S = &p256{
+ X: valSX,
+ Y: valSY,
+ }
+ p.T1 = &p256{
+ X: valT1X,
+ Y: valT1Y,
+ }
+ p.T2 = &p256{
+ X: valT2X,
+ Y: valT2Y,
+ }
+ p.Commit = &p256{
+ X: valCommitX,
+ Y: valCommitY,
+ }
+ valU := &p256{
+ X: valUx,
+ Y: valUy,
+ }
+ valP := &p256{
+ X: valPx,
+ Y: valPy,
+ }
+ valGg := &p256{
+ X: valGgx,
+ Y: valGgy,
+ }
+ valHh := &p256{
+ X: valHhx,
+ Y: valHhy,
+ }
+ p.Taux, _ = new(big.Int).SetString(aux.Taux, 10)
+ p.Mu, _ = new(big.Int).SetString(aux.Mu, 10)
+ p.Tprime, _ = new(big.Int).SetString(aux.Tprime, 10)
+ logn := len(aux.Proofip.Ls)
+ valLs := make([]*p256, logn)
+ valRs := make([]*p256, logn)
+ var (
+ i int
+ valLsx *big.Int
+ valLsy *big.Int
+ valRsx *big.Int
+ valRsy *big.Int
+ )
+ i = 0
+ for i < logn {
+ valLsx, _ = new(big.Int).SetString(aux.Proofip.Ls[i].X, 10)
+ valLsy, _ = new(big.Int).SetString(aux.Proofip.Ls[i].Y, 10)
+ valLs[i] = &p256{X: valLsx, Y: valLsy}
+ valRsx, _ = new(big.Int).SetString(aux.Proofip.Rs[i].X, 10)
+ valRsy, _ = new(big.Int).SetString(aux.Proofip.Rs[i].Y, 10)
+ valRs[i] = &p256{X: valRsx, Y: valRsy}
+ i = i + 1
+ }
+ p.Proofip = proofBip{
+ N: valN,
+ A: valA,
+ B: valB,
+ U: valU,
+ P: valP,
+ Gg: valGg,
+ Hh: valHh,
+ Ls: valLs,
+ Rs: valRs,
+ }
+ return nil
+}
+
+type (
+ ipgenstring struct {
+ N int64
+ Cc string
+ Uu pstring
+ H pstring
+ Gg []pstring
+ Hh []pstring
+ P pstring
+ }
+)
+
+func (s *Bp) MarshalJSON() ([]byte, error) {
+ type Alias Bp
+ var iHh []pstring
+ var iGg []pstring
+
+ var i int
+ n := len(s.Gg)
+ iGg = make([]pstring, n)
+ iHh = make([]pstring, n)
+ i = 0
+ for i < n {
+ iGg[i] = pstring{X: s.Zkip.Gg[i].X.String(), Y: s.Zkip.Gg[i].Y.String()}
+ iHh[i] = pstring{X: s.Zkip.Hh[i].X.String(), Y: s.Zkip.Hh[i].Y.String()}
+ i = i + 1
+ }
+ return json.Marshal(&struct {
+ Zkip ipgenstring `json:"Zkip"`
+ *Alias
+ }{
+ Zkip: ipgenstring{
+ N: s.N,
+ Cc: s.Zkip.Cc.String(),
+ Uu: pstring{X: s.Zkip.Uu.X.String(), Y: s.Zkip.Uu.Y.String()},
+ H: pstring{X: s.Zkip.H.X.String(), Y: s.Zkip.H.Y.String()},
+ Gg: iGg,
+ Hh: iHh,
+ P: pstring{X: s.Zkip.P.X.String(), Y: s.Zkip.P.Y.String()},
+ },
+ Alias: (*Alias)(s),
+ })
+}
+
+func (s *Bp) UnmarshalJSON(data []byte) error {
+ type Alias Bp
+ aux := &struct {
+ Zkip ipgenstring `json:"Zkip"`
+ *Alias
+ }{
+ Alias: (*Alias)(s),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ n := aux.N
+ valGg := make([]*p256, n)
+ valHh := make([]*p256, n)
+ var (
+ i int64
+ valGgx *big.Int
+ valGgy *big.Int
+ valHhx *big.Int
+ valHhy *big.Int
+ )
+ i = 0
+ for i < n {
+ valGgx, _ = new(big.Int).SetString(aux.Zkip.Gg[i].X, 10)
+ valGgy, _ = new(big.Int).SetString(aux.Zkip.Gg[i].Y, 10)
+ valGg[i] = &p256{X: valGgx, Y: valGgy}
+ valHhx, _ = new(big.Int).SetString(aux.Zkip.Hh[i].X, 10)
+ valHhy, _ = new(big.Int).SetString(aux.Zkip.Hh[i].Y, 10)
+ valHh[i] = &p256{X: valHhx, Y: valHhy}
+ i = i + 1
+ }
+ valN := aux.N
+ valCc, _ := new(big.Int).SetString(aux.Zkip.Cc, 10)
+ valUux, _ := new(big.Int).SetString(aux.Zkip.Uu.X, 10)
+ valUuy, _ := new(big.Int).SetString(aux.Zkip.Uu.Y, 10)
+ valHx, _ := new(big.Int).SetString(aux.Zkip.H.X, 10)
+ valHy, _ := new(big.Int).SetString(aux.Zkip.H.Y, 10)
+ valPx, _ := new(big.Int).SetString(aux.Zkip.P.X, 10)
+ valPy, _ := new(big.Int).SetString(aux.Zkip.P.Y, 10)
+ valUu := &p256{
+ X: valUux,
+ Y: valUuy,
+ }
+ valH := &p256{
+ X: valHx,
+ Y: valHy,
+ }
+ valP := &p256{
+ X: valPx,
+ Y: valPy,
+ }
+ s.Zkip = bip{
+ N: valN,
+ Cc: valCc,
+ Uu: valUu,
+ H: valH,
+ Gg: valGg,
+ Hh: valHh,
+ P: valP,
+ }
+ return nil
+}
+
+/*
+VectorCopy returns a vector composed by copies of a.
+*/
+func VectorCopy(a *big.Int, n int64) ([]*big.Int, error) {
+ var (
+ i int64
+ result []*big.Int
+ )
+ result = make([]*big.Int, n)
+ i = 0
+ for i < n {
+ result[i] = a
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorCopy returns a vector composed by copies of a.
+*/
+func VectorG1Copy(a *p256, n int64) ([]*p256, error) {
+ var (
+ i int64
+ result []*p256
+ )
+ result = make([]*p256, n)
+ i = 0
+ for i < n {
+ result[i] = a
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorConvertToBig converts an array of int64 to an array of big.Int.
+*/
+func VectorConvertToBig(a []int64, n int64) ([]*big.Int, error) {
+ var (
+ i int64
+ result []*big.Int
+ )
+ result = make([]*big.Int, n)
+ i = 0
+ for i < n {
+ result[i] = new(big.Int).SetInt64(a[i])
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorAdd computes vector addition componentwisely.
+*/
+func VectorAdd(a, b []*big.Int) ([]*big.Int, error) {
+ var (
+ result []*big.Int
+ i, n, m int64
+ )
+ n = int64(len(a))
+ m = int64(len(b))
+ if n != m {
+ return nil, errors.New("Size of first argument is different from size of second argument.")
+ }
+ i = 0
+ result = make([]*big.Int, n)
+ for i < n {
+ result[i] = Add(a[i], b[i])
+ result[i] = Mod(result[i], ORDER)
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorSub computes vector addition componentwisely.
+*/
+func VectorSub(a, b []*big.Int) ([]*big.Int, error) {
+ var (
+ result []*big.Int
+ i, n, m int64
+ )
+ n = int64(len(a))
+ m = int64(len(b))
+ if n != m {
+ return nil, errors.New("Size of first argument is different from size of second argument.")
+ }
+ i = 0
+ result = make([]*big.Int, n)
+ for i < n {
+ result[i] = Sub(a[i], b[i])
+ result[i] = Mod(result[i], ORDER)
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorScalarMul computes vector scalar multiplication componentwisely.
+*/
+func VectorScalarMul(a []*big.Int, b *big.Int) ([]*big.Int, error) {
+ var (
+ result []*big.Int
+ i, n int64
+ )
+ n = int64(len(a))
+ i = 0
+ result = make([]*big.Int, n)
+ for i < n {
+ result[i] = Multiply(a[i], b)
+ result[i] = Mod(result[i], ORDER)
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorMul computes vector multiplication componentwisely.
+*/
+func VectorMul(a, b []*big.Int) ([]*big.Int, error) {
+ var (
+ result []*big.Int
+ i, n, m int64
+ )
+ n = int64(len(a))
+ m = int64(len(b))
+ if n != m {
+ return nil, errors.New("Size of first argument is different from size of second argument.")
+ }
+ i = 0
+ result = make([]*big.Int, n)
+ for i < n {
+ result[i] = Multiply(a[i], b[i])
+ result[i] = Mod(result[i], ORDER)
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorECMul computes vector EC addition componentwisely.
+*/
+func VectorECAdd(a, b []*p256) ([]*p256, error) {
+ var (
+ result []*p256
+ i, n, m int64
+ )
+ n = int64(len(a))
+ m = int64(len(b))
+ if n != m {
+ return nil, errors.New("Size of first argument is different from size of second argument.")
+ }
+ result = make([]*p256, n)
+ i = 0
+ for i < n {
+ result[i] = new(p256).Multiply(a[i], b[i])
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+ScalarProduct return the inner product between a and b.
+*/
+func ScalarProduct(a, b []*big.Int) (*big.Int, error) {
+ var (
+ result *big.Int
+ i, n, m int64
+ )
+ n = int64(len(a))
+ m = int64(len(b))
+ if n != m {
+ return nil, errors.New("Size of first argument is different from size of second argument.")
+ }
+ i = 0
+ result = GetBigInt("0")
+ for i < n {
+ ab := Multiply(a[i], b[i])
+ result.Add(result, ab)
+ result = Mod(result, ORDER)
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorExp computes Prod_i^n{a[i]^b[i]}.
+*/
+func VectorExp(a []*p256, b []*big.Int) (*p256, error) {
+ var (
+ result *p256
+ i, n, m int64
+ )
+ n = int64(len(a))
+ m = int64(len(b))
+ if n != m {
+ return nil, errors.New("Size of first argument is different from size of second argument.")
+ }
+ i = 0
+ result = new(p256).SetInfinity()
+ for i < n {
+ result.Multiply(result, new(p256).ScalarMult(a[i], b[i]))
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+VectorScalarExp computes a[i]^b for each i.
+*/
+func VectorScalarExp(a []*p256, b *big.Int) ([]*p256, error) {
+ var (
+ result []*p256
+ i, n int64
+ )
+ n = int64(len(a))
+ result = make([]*p256, n)
+ i = 0
+ for i < n {
+ result[i] = new(p256).ScalarMult(a[i], b)
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+PowerOf returns a vector composed by powers of x.
+*/
+func PowerOf(x *big.Int, n int64) ([]*big.Int, error) {
+ var (
+ i int64
+ result []*big.Int
+ )
+ result = make([]*big.Int, n)
+ current := GetBigInt("1")
+ i = 0
+ for i < n {
+ result[i] = current
+ current = Multiply(current, x)
+ current = Mod(current, ORDER)
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+aR = aL - 1^n
+*/
+func ComputeAR(x []int64) ([]int64, error) {
+ var (
+ i int64
+ result []int64
+ )
+ result = make([]int64, len(x))
+ i = 0
+ for i < int64(len(x)) {
+ if x[i] == 0 {
+ result[i] = -1
+ } else if x[i] == 1 {
+ result[i] = 0
+ } else {
+ return nil, errors.New("input contains non-binary element")
+ }
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+Hash is responsible for the computing a Zp element given elements from GT and G1.
+*/
+func HashBP(A, S *p256) (*big.Int, *big.Int, error) {
+
+ digest1 := sha256.New()
+ var buffer bytes.Buffer
+ buffer.WriteString(A.X.String())
+ buffer.WriteString(A.Y.String())
+ buffer.WriteString(S.X.String())
+ buffer.WriteString(S.Y.String())
+ digest1.Write([]byte(buffer.String()))
+ output1 := digest1.Sum(nil)
+ tmp1 := output1[0:len(output1)]
+ result1 := new(big.Int).SetBytes(tmp1)
+
+ digest2 := sha256.New()
+ var buffer2 bytes.Buffer
+ buffer2.WriteString(A.X.String())
+ buffer2.WriteString(A.Y.String())
+ buffer2.WriteString(S.X.String())
+ buffer2.WriteString(S.Y.String())
+ buffer2.WriteString(result1.String())
+ digest2.Write([]byte(buffer2.String()))
+ output2 := digest2.Sum(nil)
+ tmp2 := output2[0:len(output2)]
+ result2 := new(big.Int).SetBytes(tmp2)
+
+ return result1, result2, nil
+}
+
+/*
+Commitvector computes a commitment to the bit of the secret.
+*/
+func CommitVector(aL, aR []int64, alpha *big.Int, G, H *p256, g, h []*p256, n int64) (*p256, error) {
+ var (
+ i int64
+ R *p256
+ )
+ // Compute h^alpha.vg^aL.vh^aR
+ R = new(p256).ScalarMult(H, alpha)
+ i = 0
+ for i < n {
+ gaL := new(p256).ScalarMult(g[i], new(big.Int).SetInt64(aL[i]))
+ haR := new(p256).ScalarMult(h[i], new(big.Int).SetInt64(aR[i]))
+ R.Multiply(R, gaL)
+ R.Multiply(R, haR)
+ i = i + 1
+ }
+ return R, nil
+}
+
+/*
+
+ */
+func CommitVectorBig(aL, aR []*big.Int, alpha *big.Int, G, H *p256, g, h []*p256, n int64) (*p256, error) {
+ var (
+ i int64
+ R *p256
+ )
+ // Compute h^alpha.vg^aL.vh^aR
+ R = new(p256).ScalarMult(H, alpha)
+ i = 0
+ for i < n {
+ R.Multiply(R, new(p256).ScalarMult(g[i], aL[i]))
+ R.Multiply(R, new(p256).ScalarMult(h[i], aR[i]))
+ i = i + 1
+ }
+ return R, nil
+}
+
+/*
+SaveToDisk is responsible for saving the generator to disk, such it is possible
+to then later.
+*/
+func (zkrp *Bp) SaveToDisk(s string, p *proofBP) error {
+ data, err := json.Marshal(zkrp)
+ errw := ioutil.WriteFile(s, data, 0644)
+ if p != nil {
+ datap, errp := json.Marshal(p)
+ errpw := ioutil.WriteFile("proof.dat", datap, 0644)
+ if errp != nil || errpw != nil {
+ return errors.New("proof not saved to disk.")
+ }
+ }
+ if err != nil || errw != nil {
+ return errors.New("parameters not saved to disk.")
+ }
+ return nil
+}
+
+/*
+LoadGenFromDisk reads the generator from a file.
+*/
+func LoadParamFromDisk(s string) (*Bp, error) {
+ var result Bp
+ c, err := ioutil.ReadFile(s)
+ if err != nil {
+ return nil, err
+ }
+ if len(c) > 0 {
+ json.Unmarshal(c, &result)
+ return &result, nil
+ }
+ return nil, errors.New("Could not load generators.")
+}
+
+/*
+LoadProofFromDisk reads the generator from a file.
+*/
+func LoadProofFromDisk(s string) (*proofBP, error) {
+ var result proofBP
+ c, err := ioutil.ReadFile(s)
+ if err != nil {
+ return nil, err
+ }
+ if len(c) > 0 {
+ json.Unmarshal(c, &result)
+ return &result, nil
+ }
+ return nil, errors.New("Could not load proof.")
+}
+
+/*
+delta(y,z) = (z-z^2) . < 1^n, y^n > - z^3 . < 1^n, 2^n >
+*/
+func (zkrp *Bp) Delta(y, z *big.Int) (*big.Int, error) {
+ var (
+ result *big.Int
+ )
+ // delta(y,z) = (z-z^2) . < 1^n, y^n > - z^3 . < 1^n, 2^n >
+ z2 := Multiply(z, z)
+ z2 = Mod(z2, ORDER)
+ z3 := Multiply(z2, z)
+ z3 = Mod(z3, ORDER)
+
+ // < 1^n, y^n >
+ v1, _ := VectorCopy(new(big.Int).SetInt64(1), zkrp.N)
+ vy, _ := PowerOf(y, zkrp.N)
+ sp1y, _ := ScalarProduct(v1, vy)
+
+ // < 1^n, 2^n >
+ p2n, _ := PowerOf(new(big.Int).SetInt64(2), zkrp.N)
+ sp12, _ := ScalarProduct(v1, p2n)
+
+ result = Sub(z, z2)
+ result = Mod(result, ORDER)
+ result = Multiply(result, sp1y)
+ result = Mod(result, ORDER)
+ result = Sub(result, Multiply(z3, sp12))
+ result = Mod(result, ORDER)
+
+ return result, nil
+}
+
+/*
+SetupPre is responsible for computing the common parameters.
+*/
+func (zkrp *Bp) SetupPre(a, b int64) {
+ res, _ := LoadParamFromDisk("setup.dat")
+ zkrp = res
+ // Setup Inner Product
+ zkrp.Zkip.Setup(zkrp.H, zkrp.Gg, zkrp.Hh, new(big.Int).SetInt64(0))
+}
+
+/*
+Setup is responsible for computing the common parameters.
+*/
+func (zkrp *Bp) Setup(a, b int64) {
+ var (
+ i int64
+ )
+ // 计算 G 和 H
+ zkrp.G = new(p256).ScalarBaseMult(new(big.Int).SetInt64(1))
+ zkrp.H, _ = MapToGroup(SEEDH)
+ // 有 n 位
+ zkrp.N = int64(math.Log2(float64(b)))
+ zkrp.Gg = make([]*p256, zkrp.N)
+ zkrp.Hh = make([]*p256, zkrp.N)
+ i = 0
+ for i < zkrp.N {
+ zkrp.Gg[i], _ = MapToGroup(SEEDH + "g" + string(i))
+ zkrp.Hh[i], _ = MapToGroup(SEEDH + "h" + string(i))
+ i = i + 1
+ }
+
+ // Setup Inner Product
+ zkrp.Zkip.Setup(zkrp.H, zkrp.Gg, zkrp.Hh, new(big.Int).SetInt64(0))
+ // zkrp.SaveToDisk("setup.dat", nil)
+}
+
+/*
+Prove computes the ZK proof.
+*/
+func (zkrp *Bp) Prove(secret *big.Int) (*big.Int, *p256, proofBP, error) {
+ var (
+ i int64
+ sL []*big.Int
+ sR []*big.Int
+ proof proofBP
+ )
+ //////////////////////////////////////////////////////////////////////////////
+ // First phase
+ //////////////////////////////////////////////////////////////////////////////
+
+ // commitment to v and gamma
+ gamma, _ := rand.Int(rand.Reader, ORDER)
+ V, _ := CommitG1(secret, gamma, zkrp.H)
+
+ // aL, aR and commitment: (A, alpha)
+ // 因式分解得到 aL
+ aL, _ := Decompose(secret, 2, zkrp.N)
+ // aR = aL - 1
+ aR, _ := ComputeAR(aL)
+ // 盲因子 alpha
+ alpha, _ := rand.Int(rand.Reader, ORDER)
+ // A 为 aL 的佩德森承诺
+ A, _ := CommitVector(aL, aR, alpha, zkrp.G, zkrp.H, zkrp.Gg, zkrp.Hh, zkrp.N)
+
+ // sL, sR and commitment: (S, rho)
+ rho, _ := rand.Int(rand.Reader, ORDER)
+ sL = make([]*big.Int, zkrp.N)
+ sR = make([]*big.Int, zkrp.N)
+ i = 0
+ for i < zkrp.N {
+ sL[i], _ = rand.Int(rand.Reader, ORDER)
+ sR[i], _ = rand.Int(rand.Reader, ORDER)
+ i = i + 1
+ }
+ // S 为 aR 的佩德森承诺
+ S, _ := CommitVectorBig(sL, sR, rho, zkrp.G, zkrp.H, zkrp.Gg, zkrp.Hh, zkrp.N)
+
+ // Fiat-Shamir heuristic to compute challenges y, z
+ y, z, _ := HashBP(A, S)
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Second phase
+ //////////////////////////////////////////////////////////////////////////////
+ tau1, _ := rand.Int(rand.Reader, ORDER) // page 20 from eprint version
+ tau2, _ := rand.Int(rand.Reader, ORDER)
+
+ // compute t1: < aL - z.1^n, y^n . sR > + < sL, y^n . (aR + z . 1^n) >
+ vz, _ := VectorCopy(z, zkrp.N)
+ vy, _ := PowerOf(y, zkrp.N)
+
+ // aL - z.1^n
+ naL, _ := VectorConvertToBig(aL, zkrp.N)
+ aLmvz, _ := VectorSub(naL, vz)
+
+ // y^n .sR
+ ynsR, _ := VectorMul(vy, sR)
+
+ // scalar prod: < aL - z.1^n, y^n . sR >
+ sp1, _ := ScalarProduct(aLmvz, ynsR)
+
+ // scalar prod: < sL, y^n . (aR + z . 1^n) >
+ naR, _ := VectorConvertToBig(aR, zkrp.N)
+ aRzn, _ := VectorAdd(naR, vz)
+ ynaRzn, _ := VectorMul(vy, aRzn)
+
+ // Add z^2.2^n to the result
+ // z^2 . 2^n
+ p2n, _ := PowerOf(new(big.Int).SetInt64(2), zkrp.N)
+ zsquared := Multiply(z, z)
+ z22n, _ := VectorScalarMul(p2n, zsquared)
+ ynaRzn, _ = VectorAdd(ynaRzn, z22n)
+ sp2, _ := ScalarProduct(sL, ynaRzn)
+
+ // sp1 + sp2
+ t1 := Add(sp1, sp2)
+ t1 = Mod(t1, ORDER)
+
+ // compute t2: < sL, y^n . sR >
+ t2, _ := ScalarProduct(sL, ynsR)
+ t2 = Mod(t2, ORDER)
+
+ // compute T1
+ T1, _ := CommitG1(t1, tau1, zkrp.H)
+
+ // compute T2
+ T2, _ := CommitG1(t2, tau2, zkrp.H)
+
+ // Fiat-Shamir heuristic to compute 'random' challenge x
+ x, _, _ := HashBP(T1, T2)
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Third phase //
+ //////////////////////////////////////////////////////////////////////////////
+
+ // compute bl
+ sLx, _ := VectorScalarMul(sL, x)
+ bl, _ := VectorAdd(aLmvz, sLx)
+
+ // compute br
+ // y^n . ( aR + z.1^n + sR.x )
+ sRx, _ := VectorScalarMul(sR, x)
+ aRzn, _ = VectorAdd(aRzn, sRx)
+ ynaRzn, _ = VectorMul(vy, aRzn)
+ // y^n . ( aR + z.1^n sR.x ) + z^2 . 2^n
+ br, _ := VectorAdd(ynaRzn, z22n)
+
+ // Compute t` = < bl, br >
+ tprime, _ := ScalarProduct(bl, br)
+
+ // Compute taux = tau2 . x^2 + tau1 . x + z^2 . gamma
+ taux := Multiply(tau2, Multiply(x, x))
+ taux = Add(taux, Multiply(tau1, x))
+ taux = Add(taux, Multiply(Multiply(z, z), gamma))
+ taux = Mod(taux, ORDER)
+
+ // Compute mu = alpha + rho.x
+ mu := Multiply(rho, x)
+ mu = Add(mu, alpha)
+ mu = Mod(mu, ORDER)
+
+ // Inner Product over (g, h', P.h^-mu, tprime)
+ // Compute h'
+ hprime := make([]*p256, zkrp.N)
+ // Switch generators
+ yinv := ModInverse(y, ORDER)
+ expy := yinv
+ hprime[0] = zkrp.Hh[0]
+ i = 1
+ for i < zkrp.N {
+ hprime[i] = new(p256).ScalarMult(zkrp.Hh[i], expy)
+ expy = Multiply(expy, yinv)
+ i = i + 1
+ }
+
+ // Update Inner Product Proof Setup
+ zkrp.Zkip.Hh = hprime
+ zkrp.Zkip.Cc = tprime
+
+ commit, _ := CommitInnerProduct(zkrp.Gg, hprime, bl, br)
+ proofip, _ := zkrp.Zkip.Prove(bl, br, commit)
+
+ proof.V = V
+ proof.A = A
+ proof.S = S
+ proof.T1 = T1
+ proof.T2 = T2
+ proof.Taux = taux
+ proof.Mu = mu
+ proof.Tprime = tprime
+ proof.Proofip = proofip
+ proof.Commit = commit
+
+ // zkrp.SaveToDisk("setup.dat", &proof)
+ return gamma, V, proof, nil
+}
+
+/*
+Verify returns true if and only if the proof is valid.
+*/
+func (zkrp *Bp) Verify(proof proofBP) (bool, error) {
+ var (
+ i int64
+ hprime []*p256
+ )
+ hprime = make([]*p256, zkrp.N)
+ y, z, _ := HashBP(proof.A, proof.S)
+ x, _, _ := HashBP(proof.T1, proof.T2)
+
+ // Switch generators
+ yinv := ModInverse(y, ORDER)
+ expy := yinv
+ hprime[0] = zkrp.Hh[0]
+ i = 1
+ for i < zkrp.N {
+ hprime[i] = new(p256).ScalarMult(zkrp.Hh[i], expy)
+ expy = Multiply(expy, yinv)
+ i = i + 1
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Check that tprime = t(x) = t0 + t1x + t2x^2 ---------- Condition (65) //
+ //////////////////////////////////////////////////////////////////////////////
+
+ // Compute left hand side
+ lhs, _ := CommitG1(proof.Tprime, proof.Taux, zkrp.H)
+
+ // Compute right hand side
+ z2 := Multiply(z, z)
+ z2 = Mod(z2, ORDER)
+ x2 := Multiply(x, x)
+ x2 = Mod(x2, ORDER)
+
+ rhs := new(p256).ScalarMult(proof.V, z2)
+
+ delta, _ := zkrp.Delta(y, z)
+
+ gdelta := new(p256).ScalarBaseMult(delta)
+
+ rhs.Multiply(rhs, gdelta)
+
+ T1x := new(p256).ScalarMult(proof.T1, x)
+ T2x2 := new(p256).ScalarMult(proof.T2, x2)
+
+ rhs.Multiply(rhs, T1x)
+ rhs.Multiply(rhs, T2x2)
+
+ // Subtract lhs and rhs and compare with poitn at infinity
+ lhs.Neg(lhs)
+ rhs.Multiply(rhs, lhs)
+ c65 := rhs.IsZero() // Condition (65), page 20, from eprint version
+
+ // Compute P - lhs #################### Condition (66) ######################
+
+ // S^x
+ Sx := new(p256).ScalarMult(proof.S, x)
+ // A.S^x
+ ASx := new(p256).Add(proof.A, Sx)
+
+ // g^-z
+ mz := Sub(ORDER, z)
+ vmz, _ := VectorCopy(mz, zkrp.N)
+ gpmz, _ := VectorExp(zkrp.Gg, vmz)
+ //fmt.Println("############## gpmz ###############")
+ //fmt.Println(gpmz);
+
+ // z.y^n
+ vz, _ := VectorCopy(z, zkrp.N)
+ vy, _ := PowerOf(y, zkrp.N)
+ zyn, _ := VectorMul(vy, vz)
+
+ p2n, _ := PowerOf(new(big.Int).SetInt64(2), zkrp.N)
+ zsquared := Multiply(z, z)
+ z22n, _ := VectorScalarMul(p2n, zsquared)
+
+ // z.y^n + z^2.2^n
+ zynz22n, _ := VectorAdd(zyn, z22n)
+
+ lP := new(p256)
+ lP.Add(ASx, gpmz)
+
+ // h'^(z.y^n + z^2.2^n)
+ hprimeexp, _ := VectorExp(hprime, zynz22n)
+
+ lP.Add(lP, hprimeexp)
+
+ // Compute P - rhs #################### Condition (67) ######################
+
+ // h^mu
+ rP := new(p256).ScalarMult(zkrp.H, proof.Mu)
+ rP.Multiply(rP, proof.Commit)
+
+ // Subtract lhs and rhs and compare with poitn at infinity
+ lP = lP.Neg(lP)
+ rP.Add(rP, lP)
+ c67 := rP.IsZero()
+
+ // Verify Inner Product Proof ################################################
+ ok, _ := zkrp.Zkip.Verify(proof.Proofip)
+
+ result := c65 && c67 && ok
+
+ return result, nil
+}
+
+//////////////////////////////////// Inner Product ////////////////////////////////////
+
+/*
+Base struct for the Inner Product Argument.
+*/
+type bip struct {
+ N int64
+ Cc *big.Int
+ Uu *p256
+ H *p256
+ Gg []*p256
+ Hh []*p256
+ P *p256
+}
+
+/*
+Struct that contains the Inner Product Proof.
+*/
+type proofBip struct {
+ Ls []*p256
+ Rs []*p256
+ U *p256
+ P *p256
+ Gg *p256
+ Hh *p256
+ A *big.Int
+ B *big.Int
+ N int64
+}
+
+/*
+HashIP is responsible for the computing a Zp element given elements from GT and G1.
+*/
+func HashIP(g, h []*p256, P *p256, c *big.Int, n int64) (*big.Int, error) {
+ var (
+ i int64
+ )
+
+ digest := sha256.New()
+ digest.Write([]byte(P.String()))
+
+ i = 0
+ for i < n {
+ digest.Write([]byte(g[i].String()))
+ digest.Write([]byte(h[i].String()))
+ i = i + 1
+ }
+
+ digest.Write([]byte(c.String()))
+ output := digest.Sum(nil)
+ tmp := output[0:len(output)]
+ result, err := byteconversion.FromByteArray(tmp)
+
+ return result, err
+}
+
+/*
+CommitInnerProduct is responsible for calculating g^a.h^b.
+*/
+func CommitInnerProduct(g, h []*p256, a, b []*big.Int) (*p256, error) {
+ var (
+ result *p256
+ )
+
+ ga, _ := VectorExp(g, a)
+ hb, _ := VectorExp(h, b)
+ result = new(p256).Multiply(ga, hb)
+ return result, nil
+}
+
+/*
+Setup is responsible for computing the inner product basic parameters that are common to both
+Prove and Verify algorithms.
+*/
+func (zkip *bip) Setup(H *p256, g, h []*p256, c *big.Int) (bip, error) {
+ var (
+ params bip
+ )
+
+ zkip.Gg = make([]*p256, zkip.N)
+ zkip.Hh = make([]*p256, zkip.N)
+ zkip.Uu, _ = MapToGroup(SEEDU)
+ zkip.H = H
+ zkip.Gg = g
+ zkip.Hh = h
+ zkip.Cc = c
+ zkip.P = new(p256).SetInfinity()
+
+ return params, nil
+}
+
+/*
+Prove is responsible for the generation of the Inner Product Proof.
+*/
+func (zkip *bip) Prove(a, b []*big.Int, P *p256) (proofBip, error) {
+ var (
+ proof proofBip
+ n, m int64
+ Ls []*p256
+ Rs []*p256
+ )
+
+ n = int64(len(a))
+ m = int64(len(b))
+ if n != m {
+ return proof, errors.New("Size of first array argument must be equal to the second")
+ } else {
+ // Fiat-Shamir:
+ // x = Hash(g,h,P,c)
+ x, _ := HashIP(zkip.Gg, zkip.Hh, P, zkip.Cc, zkip.N)
+ // Pprime = P.u^(x.c)
+ ux := new(p256).ScalarMult(zkip.Uu, x)
+ uxc := new(p256).ScalarMult(ux, zkip.Cc)
+ PP := new(p256).Multiply(P, uxc)
+ // Execute Protocol 2 recursively
+ zkip.P = PP
+ proof, err := BIP(a, b, zkip.Gg, zkip.Hh, ux, zkip.P, n, Ls, Rs)
+ proof.P = PP
+ return proof, err
+ }
+
+ return proof, nil
+}
+
+/*
+BIP is the main recursive function that will be used to compute the inner product argument.
+*/
+func BIP(a, b []*big.Int, g, h []*p256, u, P *p256, n int64, Ls, Rs []*p256) (proofBip, error) {
+ var (
+ proof proofBip
+ cL, cR, x, xinv, x2, x2inv *big.Int
+ L, R, Lh, Rh, Pprime *p256
+ gprime, hprime, gprime2, hprime2 []*p256
+ aprime, bprime, aprime2, bprime2 []*big.Int
+ )
+
+ if n == 1 {
+ // recursion end
+ proof.A = a[0]
+ proof.B = b[0]
+ proof.Gg = g[0]
+ proof.Hh = h[0]
+ proof.P = P
+ proof.U = u
+ proof.Ls = Ls
+ proof.Rs = Rs
+
+ } else {
+ // recursion
+
+ // nprime := n / 2
+ nprime := n / 2
+
+ // Compute cL = < a[:n'], b[n':] >
+ cL, _ = ScalarProduct(a[:nprime], b[nprime:])
+ // Compute cR = < a[n':], b[:n'] >
+ cR, _ = ScalarProduct(a[nprime:], b[:nprime])
+ // Compute L = g[n':]^(a[:n']).h[:n']^(b[n':]).u^cL
+ L, _ = VectorExp(g[nprime:], a[:nprime])
+ Lh, _ = VectorExp(h[:nprime], b[nprime:])
+ L.Multiply(L, Lh)
+ L.Multiply(L, new(p256).ScalarMult(u, cL))
+
+ // Compute R = g[:n']^(a[n':]).h[n':]^(b[:n']).u^cR
+ R, _ = VectorExp(g[:nprime], a[nprime:])
+ Rh, _ = VectorExp(h[nprime:], b[:nprime])
+ R.Multiply(R, Rh)
+ R.Multiply(R, new(p256).ScalarMult(u, cR))
+
+ // Fiat-Shamir:
+ x, _, _ = HashBP(L, R)
+ xinv = ModInverse(x, ORDER)
+
+ // Compute g' = g[:n']^(x^-1) * g[n':]^(x)
+ gprime, _ = VectorScalarExp(g[:nprime], xinv)
+ gprime2, _ = VectorScalarExp(g[nprime:], x)
+ gprime, _ = VectorECAdd(gprime, gprime2)
+ // Compute h' = h[:n']^(x) * h[n':]^(x^-1)
+ hprime, _ = VectorScalarExp(h[:nprime], x)
+ hprime2, _ = VectorScalarExp(h[nprime:], xinv)
+ hprime, _ = VectorECAdd(hprime, hprime2)
+
+ // Compute P' = L^(x^2).P.R^(x^-2)
+ x2 = Mod(Multiply(x, x), ORDER)
+ x2inv = ModInverse(x2, ORDER)
+ Pprime = new(p256).ScalarMult(L, x2)
+ Pprime.Multiply(Pprime, P)
+ Pprime.Multiply(Pprime, new(p256).ScalarMult(R, x2inv))
+
+ // Compute a' = a[:n'].x + a[n':].x^(-1)
+ aprime, _ = VectorScalarMul(a[:nprime], x)
+ aprime2, _ = VectorScalarMul(a[nprime:], xinv)
+ aprime, _ = VectorAdd(aprime, aprime2)
+ // Compute b' = b[:n'].x^(-1) + b[n':].x
+ bprime, _ = VectorScalarMul(b[:nprime], xinv)
+ bprime2, _ = VectorScalarMul(b[nprime:], x)
+ bprime, _ = VectorAdd(bprime, bprime2)
+
+ Ls = append(Ls, L)
+ Rs = append(Rs, R)
+ // recursion BIP(g',h',u,P'; a', b')
+ proof, _ = BIP(aprime, bprime, gprime, hprime, u, Pprime, nprime, Ls, Rs)
+ }
+ proof.N = n
+ return proof, nil
+}
+
+/*
+Verify is responsible for the verification of the Inner Product Proof.
+*/
+func (zkip *bip) Verify(proof proofBip) (bool, error) {
+
+ logn := len(proof.Ls)
+ var (
+ i int64
+ x, xinv, x2, x2inv *big.Int
+ ngprime, nhprime, ngprime2, nhprime2 []*p256
+ )
+
+ i = 0
+ gprime := zkip.Gg
+ hprime := zkip.Hh
+ Pprime := zkip.P
+ nprime := proof.N
+ for i < int64(logn) {
+ nprime = nprime / 2
+ x, _, _ = HashBP(proof.Ls[i], proof.Rs[i])
+ xinv = ModInverse(x, ORDER)
+ // Compute g' = g[:n']^(x^-1) * g[n':]^(x)
+ ngprime, _ = VectorScalarExp(gprime[:nprime], xinv)
+ ngprime2, _ = VectorScalarExp(gprime[nprime:], x)
+ gprime, _ = VectorECAdd(ngprime, ngprime2)
+ // Compute h' = h[:n']^(x) * h[n':]^(x^-1)
+ nhprime, _ = VectorScalarExp(hprime[:nprime], x)
+ nhprime2, _ = VectorScalarExp(hprime[nprime:], xinv)
+ hprime, _ = VectorECAdd(nhprime, nhprime2)
+ // Compute P' = L^(x^2).P.R^(x^-2)
+ x2 = Mod(Multiply(x, x), ORDER)
+ x2inv = ModInverse(x2, ORDER)
+ Pprime.Multiply(Pprime, new(p256).ScalarMult(proof.Ls[i], x2))
+ Pprime.Multiply(Pprime, new(p256).ScalarMult(proof.Rs[i], x2inv))
+ i = i + 1
+ }
+
+ // c == a*b
+ ab := Multiply(proof.A, proof.B)
+ ab = Mod(ab, ORDER)
+
+ rhs := new(p256).ScalarMult(gprime[0], proof.A)
+ hb := new(p256).ScalarMult(hprime[0], proof.B)
+ rhs.Multiply(rhs, hb)
+ rhs.Multiply(rhs, new(p256).ScalarMult(proof.U, ab))
+
+ nP := Pprime.Neg(Pprime)
+ nP.Multiply(nP, rhs)
+ c := nP.IsZero()
+
+ return c, nil
+}
diff --git a/src/ConfidentialTx/zkproofs/bulletproofs_test.go b/src/ConfidentialTx/zkproofs/bulletproofs_test.go
new file mode 100644
index 0000000..48ab495
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/bulletproofs_test.go
@@ -0,0 +1,355 @@
+// Copyright 2018 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package zkproofs
+
+import (
+ "crypto/rand"
+ "fmt"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ing-bank/zkproofs/go-ethereum/crypto/bn256"
+)
+
+/*
+Test method VectorCopy, which simply copies the first input argument to size n vector.
+*/
+func TestVectorCopy(t *testing.T) {
+ var (
+ result []*big.Int
+ )
+ result, _ = VectorCopy(new(big.Int).SetInt64(1), 3)
+ ok := (result[0].Cmp(new(big.Int).SetInt64(1)) == 0)
+ ok = ok && (result[1].Cmp(GetBigInt("1")) == 0)
+ ok = ok && (result[2].Cmp(GetBigInt("1")) == 0)
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Test method VectorConvertToBig.
+*/
+func TestVectorConvertToBig(t *testing.T) {
+ var (
+ result []*big.Int
+ a []int64
+ )
+ a = make([]int64, 3)
+ a[0] = 3
+ a[1] = 4
+ a[2] = 5
+ result, _ = VectorConvertToBig(a, 3)
+ ok := (result[0].Cmp(new(big.Int).SetInt64(3)) == 0)
+ ok = ok && (result[1].Cmp(GetBigInt("4")) == 0)
+ ok = ok && (result[2].Cmp(GetBigInt("5")) == 0)
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Scalar Product returns the inner product between 2 vectors.
+*/
+func TestScalarProduct(t *testing.T) {
+ var (
+ a, b []*big.Int
+ )
+ a = make([]*big.Int, 3)
+ b = make([]*big.Int, 3)
+ a[0] = new(big.Int).SetInt64(7)
+ a[1] = new(big.Int).SetInt64(7)
+ a[2] = new(big.Int).SetInt64(7)
+ b[0] = new(big.Int).SetInt64(3)
+ b[1] = new(big.Int).SetInt64(3)
+ b[2] = new(big.Int).SetInt64(3)
+ result, _ := ScalarProduct(a, b)
+ ok := (result.Cmp(new(big.Int).SetInt64(63)) == 0)
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Tests Vector addition.
+*/
+func TestVectorAdd(t *testing.T) {
+ var (
+ a, b []*big.Int
+ )
+ a = make([]*big.Int, 3)
+ b = make([]*big.Int, 3)
+ a[0] = new(big.Int).SetInt64(7)
+ a[1] = new(big.Int).SetInt64(8)
+ a[2] = new(big.Int).SetInt64(9)
+ b[0] = new(big.Int).SetInt64(3)
+ b[1] = new(big.Int).SetInt64(30)
+ b[2] = new(big.Int).SetInt64(40)
+ result, _ := VectorAdd(a, b)
+ ok := (result[0].Cmp(new(big.Int).SetInt64(10)) == 0)
+ ok = ok && (result[1].Cmp(GetBigInt("38")) == 0)
+ ok = ok && (result[2].Cmp(GetBigInt("49")) == 0)
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Tests Vector subtraction.
+*/
+func TestVectorSub(t *testing.T) {
+ var (
+ a, b []*big.Int
+ )
+ a = make([]*big.Int, 3)
+ b = make([]*big.Int, 3)
+ a[0] = new(big.Int).SetInt64(7)
+ a[1] = new(big.Int).SetInt64(8)
+ a[2] = new(big.Int).SetInt64(9)
+ b[0] = new(big.Int).SetInt64(3)
+ b[1] = new(big.Int).SetInt64(30)
+ b[2] = new(big.Int).SetInt64(40)
+ result, _ := VectorSub(a, b)
+ ok := (result[0].Cmp(new(big.Int).SetInt64(4)) == 0)
+ ok = ok && (result[1].Cmp(GetBigInt("115792089237316195423570985008687907852837564279074904382605163141518161494315")) == 0)
+ ok = ok && (result[2].Cmp(GetBigInt("115792089237316195423570985008687907852837564279074904382605163141518161494306")) == 0)
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Tests Vector componentwise multiplication.
+*/
+func TestVectorMul(t *testing.T) {
+ var (
+ a, b []*big.Int
+ )
+ a = make([]*big.Int, 3)
+ b = make([]*big.Int, 3)
+ a[0] = new(big.Int).SetInt64(7)
+ a[1] = new(big.Int).SetInt64(8)
+ a[2] = new(big.Int).SetInt64(9)
+ b[0] = new(big.Int).SetInt64(3)
+ b[1] = new(big.Int).SetInt64(30)
+ b[2] = new(big.Int).SetInt64(40)
+ result, _ := VectorMul(a, b)
+ ok := (result[0].Cmp(new(big.Int).SetInt64(21)) == 0)
+ ok = ok && (result[1].Cmp(new(big.Int).SetInt64(240)) == 0)
+ ok = ok && (result[2].Cmp(new(big.Int).SetInt64(360)) == 0)
+
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Test method PowerOf, which must return a vector containing a growing sequence of
+powers of 2.
+*/
+func TestPowerOf(t *testing.T) {
+ result, _ := PowerOf(new(big.Int).SetInt64(3), 3)
+ ok := (result[0].Cmp(new(big.Int).SetInt64(1)) == 0)
+ ok = ok && (result[1].Cmp(new(big.Int).SetInt64(3)) == 0)
+ ok = ok && (result[2].Cmp(new(big.Int).SetInt64(9)) == 0)
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Test Inner Product argument.
+*/
+func TestInnerProduct(t *testing.T) {
+ var (
+ zkrp Bp
+ zkip bip
+ a []*big.Int
+ b []*big.Int
+ )
+ // TODO:
+ // Review if it is the best way, since we maybe could use the
+ // inner product independently of the range proof.
+ zkrp.Setup(0, 16)
+ a = make([]*big.Int, zkrp.N)
+ a[0] = new(big.Int).SetInt64(2)
+ a[1] = new(big.Int).SetInt64(-1)
+ a[2] = new(big.Int).SetInt64(10)
+ a[3] = new(big.Int).SetInt64(6)
+ b = make([]*big.Int, zkrp.N)
+ b[0] = new(big.Int).SetInt64(1)
+ b[1] = new(big.Int).SetInt64(2)
+ b[2] = new(big.Int).SetInt64(10)
+ b[3] = new(big.Int).SetInt64(7)
+ c := new(big.Int).SetInt64(142)
+ commit, _ := CommitInnerProduct(zkrp.Gg, zkrp.Hh, a, b)
+ zkip.Setup(zkrp.H, zkrp.Gg, zkrp.Hh, c)
+ proof, _ := zkip.Prove(a, b, commit)
+ ok, _ := zkip.Verify(proof)
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Test the FALSE case of ZK Range Proof scheme using Bulletproofs.
+*/
+func TestFalseBulletproofsZKRP(t *testing.T) {
+ var (
+ zkrp Bp
+ )
+ startTime := time.Now()
+ zkrp.Setup(0, 4294967296) // ITS BEING USED TO COMPUTE N
+ setupTime := time.Now()
+ fmt.Println("Setup time:")
+ fmt.Println(setupTime.Sub(startTime))
+
+ x := new(big.Int).SetInt64(4294967296)
+ proof, _ := zkrp.Prove(x)
+ proofTime := time.Now()
+ fmt.Println("Proof time:")
+ fmt.Println(proofTime.Sub(setupTime))
+
+ ok, _ := zkrp.Verify(proof)
+ verifyTime := time.Now()
+ fmt.Println("Verify time:")
+ fmt.Println(verifyTime.Sub(proofTime))
+
+ fmt.Println("Range Proofs invalid test result:")
+ fmt.Println(ok)
+ if ok != false {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+/*
+Test the TRUE case of ZK Range Proof scheme using Bulletproofs.
+*/
+func TestTrueBulletproofsZKRP(t *testing.T) {
+ var (
+ zkrp Bp
+ )
+ startTime := time.Now()
+ zkrp.Setup(0, 4294967296) // ITS BEING USED TO COMPUTE N
+ setupTime := time.Now()
+ fmt.Println("Setup time:")
+ fmt.Println(setupTime.Sub(startTime))
+
+ x := new(big.Int).SetInt64(65535)
+ proof, _ := zkrp.Prove(x)
+ proofTime := time.Now()
+ fmt.Println("Proof time:")
+ fmt.Println(proofTime.Sub(setupTime))
+
+ ok, _ := zkrp.Verify(proof)
+ verifyTime := time.Now()
+ fmt.Println("Verify time:")
+ fmt.Println(verifyTime.Sub(proofTime))
+
+ fmt.Println("Range Proofs result:")
+ fmt.Println(ok)
+ if ok != true {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+func BenchmarkBulletproofs(b *testing.B) {
+ var (
+ zkrp Bp
+ proof proofBP
+ ok bool
+ )
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ zkrp.Setup(0, 4294967296) // ITS BEING USED TO COMPUTE N
+ x := new(big.Int).SetInt64(4294967295)
+ proof, _ = zkrp.Prove(x)
+ ok, _ = zkrp.Verify(proof)
+ if ok != true {
+ b.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+ }
+}
+
+func BenchmarkScalarMult(b *testing.B) {
+ var (
+ a *big.Int
+ A *bn256.G1
+ )
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ a, _ = rand.Int(rand.Reader, bn256.Order)
+ A = new(bn256.G1).ScalarBaseMult(a)
+ }
+ fmt.Println("A:")
+ fmt.Println(A)
+}
+
+func TestHashBP(t *testing.T) {
+ agx, _ := new(big.Int).SetString("110720467414728166769654679803728202169916280248550137472490865118702779748947", 10)
+ agy, _ := new(big.Int).SetString("103949684536896233354287911519259186718323435572971865592336813380571928560949", 10)
+ sgx, _ := new(big.Int).SetString("78662919066140655151560869958157053125629409725243565127658074141532489435921", 10)
+ sgy, _ := new(big.Int).SetString("114946280626097680211499478702679495377587739951564115086530426937068100343655", 10)
+ pointa := &p256{X: agx, Y: agy}
+ points := &p256{X: sgx, Y: sgy}
+ result1, result2, _ := HashBP(pointa, points)
+ res1, _ := new(big.Int).SetString("103823382860325249552741530200099120077084118788867728791742258217664299339569", 10)
+ res2, _ := new(big.Int).SetString("8192372577089859289404358830067912230280991346287696886048261417244724213964", 10)
+ ok1 := (result1.Cmp(res1) != 0)
+ ok2 := (result2.Cmp(res2) != 0)
+ ok := ok1 && ok2
+ if ok {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+func TestHashBPGx(t *testing.T) {
+ gx, _ := new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
+ gy, _ := new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
+ point := &p256{X: gx, Y: gy}
+ result1, result2, _ := HashBP(point, point)
+ res1, _ := new(big.Int).SetString("11897424191990306464486192136408618361228444529783223689021929580052970909263", 10)
+ res2, _ := new(big.Int).SetString("22166487799255634251145870394406518059682307840904574298117500050508046799269", 10)
+ ok1 := (result1.Cmp(res1) != 0)
+ ok2 := (result2.Cmp(res2) != 0)
+ ok := ok1 && ok2
+ if ok {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+func TestInv(t *testing.T) {
+ y, _ := new(big.Int).SetString("103823382860325249552741530200099120077084118788867728791742258217664299339569", 10)
+ yinv := ModInverse(y, ORDER)
+ res, _ := new(big.Int).SetString("38397371868935917445400134055424677162505875368971619911110421656148020877351", 10)
+ ok := (yinv.Cmp(res) != 0)
+ if ok {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
+
+func TestHPrime(t *testing.T) {
+ var zkrp *Bp
+ var proof *proofBP
+ zkrp, _ = LoadParamFromDisk("setup.dat")
+ proof, _ = LoadProofFromDisk("proof.dat")
+ ok, _ := zkrp.Verify(*proof)
+ if !ok {
+ t.Errorf("Assert failure: expected true, actual: %t", ok)
+ }
+}
diff --git a/src/ConfidentialTx/zkproofs/ccs08.go b/src/ConfidentialTx/zkproofs/ccs08.go
new file mode 100644
index 0000000..687f717
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/ccs08.go
@@ -0,0 +1,426 @@
+// Copyright 2018 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+/*
+This file contains the implementation of the ZKRP scheme proposed in the paper:
+Efficient Protocols for Set Membership and Range Proofs
+Jan Camenisch, Rafik Chaabouni, abhi shelat
+Asiacrypt 2008
+*/
+
+package zkproofs
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "math"
+ "math/big"
+ "strconv"
+
+ "../crypto/bn256"
+)
+
+/*
+paramsSet contains elements generated by the verifier, which are necessary for the prover.
+This must be computed in a trusted setup.
+*/
+type paramsSet struct {
+ signatures map[int64]*bn256.G2
+ H *bn256.G2
+ // TODO:must protect the private key
+ kp keypair
+ // u determines the amount of signatures we need in the public params.
+ // Each signature can be compressed to just 1 field element of 256 bits.
+ // Then the parameters have minimum size equal to 256*u bits.
+ // l determines how many pairings we need to compute, then in order to improve
+ // verifier`s performance we want to minize it.
+ // Namely, we have 2*l pairings for the prover and 3*l for the verifier.
+}
+
+/*
+paramsUL contains elements generated by the verifier, which are necessary for the prover.
+This must be computed in a trusted setup.
+*/
+type paramsUL struct {
+ signatures map[string]*bn256.G2
+ H *bn256.G2
+ // TODO:must protect the private key
+ kp keypair
+ // u determines the amount of signatures we need in the public params.
+ // Each signature can be compressed to just 1 field element of 256 bits.
+ // Then the parameters have minimum size equal to 256*u bits.
+ // l determines how many pairings we need to compute, then in order to improve
+ // verifier`s performance we want to minize it.
+ // Namely, we have 2*l pairings for the prover and 3*l for the verifier.
+ u, l int64
+}
+
+/*
+proofSet contains the necessary elements for the ZK Set Membership proof.
+*/
+type proofSet struct {
+ V *bn256.G2
+ D, C *bn256.G2
+ a *bn256.GT
+ s, t, zsig, zv *big.Int
+ c, m, zr *big.Int
+}
+
+/*
+proofUL contains the necessary elements for the ZK proof.
+*/
+type proofUL struct {
+ V []*bn256.G2
+ D, C *bn256.G2
+ a []*bn256.GT
+ s, t, zsig, zv []*big.Int
+ c, m, zr *big.Int
+}
+
+/*
+SetupSet generates the signature for the elements in the set.
+*/
+func SetupSet(s []int64) (paramsSet, error) {
+ var (
+ i int
+ p paramsSet
+ )
+ p.kp, _ = keygen()
+
+ p.signatures = make(map[int64]*bn256.G2)
+ for i = 0; i < len(s); i++ {
+ sig_i, _ := sign(new(big.Int).SetInt64(int64(s[i])), p.kp.privk)
+ p.signatures[s[i]] = sig_i
+ }
+ //TODO: protect the 'master' key
+ h := GetBigInt("18560948149108576432482904553159745978835170526553990798435819795989606410925")
+ p.H = new(bn256.G2).ScalarBaseMult(h)
+ return p, nil
+}
+
+/*
+SetupUL generates the signature for the interval [0,u^l).
+The value of u should be roughly b/log(b), but we can choose smaller values in
+order to get smaller parameters, at the cost of having worse performance.
+*/
+func SetupUL(u, l int64) (paramsUL, error) {
+ var (
+ i int64
+ p paramsUL
+ )
+ p.kp, _ = keygen()
+
+ p.signatures = make(map[string]*bn256.G2)
+ for i = 0; i < u; i++ {
+ sig_i, _ := sign(new(big.Int).SetInt64(i), p.kp.privk)
+ p.signatures[strconv.FormatInt(i, 10)] = sig_i
+ }
+ //TODO: protect the 'master' key
+ h := GetBigInt("18560948149108576432482904553159745978835170526553990798435819795989606410925")
+ p.H = new(bn256.G2).ScalarBaseMult(h)
+ p.u = u
+ p.l = l
+ return p, nil
+}
+
+/*
+ProveSet method is used to produce the ZK Set Membership proof.
+*/
+func ProveSet(x int64, r *big.Int, p paramsSet) (proofSet, error) {
+ var (
+ v *big.Int
+ proof_out proofSet
+ )
+
+ // Initialize variables
+ proof_out.D = new(bn256.G2)
+ proof_out.D.SetInfinity()
+ proof_out.m, _ = rand.Int(rand.Reader, bn256.Order)
+
+ D := new(bn256.G2)
+ v, _ = rand.Int(rand.Reader, bn256.Order)
+ A, ok := p.signatures[x]
+ if ok {
+ // D = g^s.H^m
+ D = new(bn256.G2).ScalarMult(p.H, proof_out.m)
+ proof_out.s, _ = rand.Int(rand.Reader, bn256.Order)
+ aux := new(bn256.G2).ScalarBaseMult(proof_out.s)
+ D.Add(D, aux)
+
+ proof_out.V = new(bn256.G2).ScalarMult(A, v)
+ proof_out.t, _ = rand.Int(rand.Reader, bn256.Order)
+ proof_out.a = bn256.Pair(G1, proof_out.V)
+ proof_out.a.ScalarMult(proof_out.a, proof_out.s)
+ proof_out.a.Invert(proof_out.a)
+ proof_out.a.Add(proof_out.a, new(bn256.GT).ScalarMult(E, proof_out.t))
+ } else {
+ return proof_out, errors.New("Could not generate proof. Element does not belong to the interval.")
+ }
+ proof_out.D.Add(proof_out.D, D)
+
+ // Consider passing C as input,
+ // so that it is possible to delegate the commitment computation to an external party.
+ proof_out.C, _ = Commit(new(big.Int).SetInt64(x), r, p.H)
+ // Fiat-Shamir heuristic
+ proof_out.c, _ = HashSet(proof_out.a, proof_out.D)
+ proof_out.c = Mod(proof_out.c, bn256.Order)
+
+ proof_out.zr = Sub(proof_out.m, Multiply(r, proof_out.c))
+ proof_out.zr = Mod(proof_out.zr, bn256.Order)
+ proof_out.zsig = Sub(proof_out.s, Multiply(new(big.Int).SetInt64(x), proof_out.c))
+ proof_out.zsig = Mod(proof_out.zsig, bn256.Order)
+ proof_out.zv = Sub(proof_out.t, Multiply(v, proof_out.c))
+ proof_out.zv = Mod(proof_out.zv, bn256.Order)
+ return proof_out, nil
+}
+
+/*
+ProveUL method is used to produce the ZKRP proof that secret x belongs to the interval [0,U^L].
+*/
+func ProveUL(x, r *big.Int, p paramsUL) (proofUL, error) {
+ var (
+ i int64
+ v []*big.Int
+ proof_out proofUL
+ )
+ decx, _ := Decompose(x, p.u, p.l)
+
+ // Initialize variables
+ v = make([]*big.Int, p.l, p.l)
+ proof_out.V = make([]*bn256.G2, p.l, p.l)
+ proof_out.a = make([]*bn256.GT, p.l, p.l)
+ proof_out.s = make([]*big.Int, p.l, p.l)
+ proof_out.t = make([]*big.Int, p.l, p.l)
+ proof_out.zsig = make([]*big.Int, p.l, p.l)
+ proof_out.zv = make([]*big.Int, p.l, p.l)
+ proof_out.D = new(bn256.G2)
+ proof_out.D.SetInfinity()
+ proof_out.m, _ = rand.Int(rand.Reader, bn256.Order)
+
+ // D = H^m
+ D := new(bn256.G2).ScalarMult(p.H, proof_out.m)
+ for i = 0; i < p.l; i++ {
+ v[i], _ = rand.Int(rand.Reader, bn256.Order)
+ A, ok := p.signatures[strconv.FormatInt(decx[i], 10)]
+ if ok {
+ proof_out.V[i] = new(bn256.G2).ScalarMult(A, v[i])
+ proof_out.s[i], _ = rand.Int(rand.Reader, bn256.Order)
+ proof_out.t[i], _ = rand.Int(rand.Reader, bn256.Order)
+ proof_out.a[i] = bn256.Pair(G1, proof_out.V[i])
+ proof_out.a[i].ScalarMult(proof_out.a[i], proof_out.s[i])
+ proof_out.a[i].Invert(proof_out.a[i])
+ proof_out.a[i].Add(proof_out.a[i], new(bn256.GT).ScalarMult(E, proof_out.t[i]))
+
+ ui := new(big.Int).Exp(new(big.Int).SetInt64(p.u), new(big.Int).SetInt64(i), nil)
+ muisi := new(big.Int).Mul(proof_out.s[i], ui)
+ muisi = Mod(muisi, bn256.Order)
+ aux := new(bn256.G2).ScalarBaseMult(muisi)
+ D.Add(D, aux)
+ } else {
+ return proof_out, errors.New("Could not generate proof. Element does not belong to the interval.")
+ }
+ }
+ proof_out.D.Add(proof_out.D, D)
+
+ // Consider passing C as input,
+ // so that it is possible to delegate the commitment computation to an external party.
+ proof_out.C, _ = Commit(x, r, p.H)
+ // Fiat-Shamir heuristic
+ proof_out.c, _ = Hash(proof_out.a, proof_out.D)
+ proof_out.c = Mod(proof_out.c, bn256.Order)
+
+ proof_out.zr = Sub(proof_out.m, Multiply(r, proof_out.c))
+ proof_out.zr = Mod(proof_out.zr, bn256.Order)
+ for i = 0; i < p.l; i++ {
+ proof_out.zsig[i] = Sub(proof_out.s[i], Multiply(new(big.Int).SetInt64(decx[i]), proof_out.c))
+ proof_out.zsig[i] = Mod(proof_out.zsig[i], bn256.Order)
+ proof_out.zv[i] = Sub(proof_out.t[i], Multiply(v[i], proof_out.c))
+ proof_out.zv[i] = Mod(proof_out.zv[i], bn256.Order)
+ }
+ return proof_out, nil
+}
+
+/*
+VerifySet is used to validate the ZK Set Membership proof. It returns true iff the proof is valid.
+*/
+func VerifySet(proof_out *proofSet, p *paramsSet) (bool, error) {
+ var (
+ D *bn256.G2
+ r1, r2 bool
+ p1, p2 *bn256.GT
+ )
+ // D == C^c.h^ zr.g^zsig ?
+ D = new(bn256.G2).ScalarMult(proof_out.C, proof_out.c)
+ D.Add(D, new(bn256.G2).ScalarMult(p.H, proof_out.zr))
+ aux := new(bn256.G2).ScalarBaseMult(proof_out.zsig)
+ D.Add(D, aux)
+
+ DBytes := D.Marshal()
+ pDBytes := proof_out.D.Marshal()
+ r1 = bytes.Equal(DBytes, pDBytes)
+
+ r2 = true
+ // a == [e(V,y)^c].[e(V,g)^-zsig].[e(g,g)^zv]
+ p1 = bn256.Pair(p.kp.pubk, proof_out.V)
+ p1.ScalarMult(p1, proof_out.c)
+ p2 = bn256.Pair(G1, proof_out.V)
+ p2.ScalarMult(p2, proof_out.zsig)
+ p2.Invert(p2)
+ p1.Add(p1, p2)
+ p1.Add(p1, new(bn256.GT).ScalarMult(E, proof_out.zv))
+
+ pBytes := p1.Marshal()
+ aBytes := proof_out.a.Marshal()
+ r2 = r2 && bytes.Equal(pBytes, aBytes)
+ return r1 && r2, nil
+}
+
+/*
+VerifyUL is used to validate the ZKRP proof. It returns true iff the proof is valid.
+*/
+func VerifyUL(proof_out *proofUL, p *paramsUL) (bool, error) {
+ var (
+ i int64
+ D *bn256.G2
+ r1, r2 bool
+ p1, p2 *bn256.GT
+ )
+ // D == C^c.h^ zr.g^zsig ?
+ D = new(bn256.G2).ScalarMult(proof_out.C, proof_out.c)
+ D.Add(D, new(bn256.G2).ScalarMult(p.H, proof_out.zr))
+ for i = 0; i < p.l; i++ {
+ ui := new(big.Int).Exp(new(big.Int).SetInt64(p.u), new(big.Int).SetInt64(i), nil)
+ muizsigi := new(big.Int).Mul(proof_out.zsig[i], ui)
+ muizsigi = Mod(muizsigi, bn256.Order)
+ aux := new(bn256.G2).ScalarBaseMult(muizsigi)
+ D.Add(D, aux)
+ }
+
+ DBytes := D.Marshal()
+ pDBytes := proof_out.D.Marshal()
+ r1 = bytes.Equal(DBytes, pDBytes)
+
+ r2 = true
+ for i = 0; i < p.l; i++ {
+ // a == [e(V,y)^c].[e(V,g)^-zsig].[e(g,g)^zv]
+ p1 = bn256.Pair(p.kp.pubk, proof_out.V[i])
+ p1.ScalarMult(p1, proof_out.c)
+ p2 = bn256.Pair(G1, proof_out.V[i])
+ p2.ScalarMult(p2, proof_out.zsig[i])
+ p2.Invert(p2)
+ p1.Add(p1, p2)
+ p1.Add(p1, new(bn256.GT).ScalarMult(E, proof_out.zv[i]))
+
+ pBytes := p1.Marshal()
+ aBytes := proof_out.a[i].Marshal()
+ r2 = r2 && bytes.Equal(pBytes, aBytes)
+ }
+ return r1 && r2, nil
+}
+
+/*
+proof contains the necessary elements for the ZK proof.
+*/
+type proof struct {
+ p1, p2 proofUL
+}
+
+/*
+params contains elements generated by the verifier, which are necessary for the prover.
+This must be computed in a trusted setup.
+*/
+type params struct {
+ p *paramsUL
+ a, b int64
+}
+
+type ccs08 struct {
+ p *params
+ x, r *big.Int
+ proof_out proof
+ pubk *bn256.G1
+}
+
+/*
+Setup receives integers a and b, and configures the parameters for the rangeproof scheme.
+*/
+func (zkrp *ccs08) Setup(a, b int64) error {
+ // Compute optimal values for u and l
+ var (
+ u, l int64
+ logb float64
+ p *params
+ )
+ if a > b {
+ zkrp.p = nil
+ return errors.New("a must be less than or equal to b")
+ }
+ p = new(params)
+ logb = math.Log(float64(b))
+ if logb != 0 {
+ // TODO: understand how to find optimal parameters
+ //u = b / int64(logb)
+ u = 57
+ if u != 0 {
+ l = 0
+ for i := b; i > 0; i = i / u {
+ l = l + 1
+ }
+ params_out, e := SetupUL(u, l)
+ p.p = ¶ms_out
+ p.a = a
+ p.b = b
+ zkrp.p = p
+ return e
+ } else {
+ zkrp.p = nil
+ return errors.New("u is zero")
+ }
+ } else {
+ zkrp.p = nil
+ return errors.New("log(b) is zero")
+ }
+}
+
+/*
+Prove method is responsible for generating the zero knowledge proof.
+*/
+func (zkrp *ccs08) Prove() error {
+ ul := new(big.Int).Exp(new(big.Int).SetInt64(zkrp.p.p.u), new(big.Int).SetInt64(zkrp.p.p.l), nil)
+
+ // x - b + ul
+ xb := new(big.Int).Sub(zkrp.x, new(big.Int).SetInt64(zkrp.p.b))
+ xb.Add(xb, ul)
+ first, _ := ProveUL(xb, zkrp.r, *zkrp.p.p)
+
+ // x - a
+ xa := new(big.Int).Sub(zkrp.x, new(big.Int).SetInt64(zkrp.p.a))
+ second, _ := ProveUL(xa, zkrp.r, *zkrp.p.p)
+
+ zkrp.proof_out.p1 = first
+ zkrp.proof_out.p2 = second
+ return nil
+}
+
+/*
+Verify is responsible for validating the proof.
+*/
+func (zkrp *ccs08) Verify() (bool, error) {
+ first, _ := VerifyUL(&zkrp.proof_out.p1, zkrp.p.p)
+ second, _ := VerifyUL(&zkrp.proof_out.p2, zkrp.p.p)
+ return first && second, nil
+}
diff --git a/src/ConfidentialTx/zkproofs/ccs08_test.go b/src/ConfidentialTx/zkproofs/ccs08_test.go
new file mode 100644
index 0000000..3b4c08e
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/ccs08_test.go
@@ -0,0 +1,190 @@
+// Copyright 2018 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package zkproofs
+
+import (
+ "testing"
+ "math/big"
+ "crypto/rand"
+ "fmt"
+ "github.com/ing-bank/zkproofs/go-ethereum/crypto/bn256"
+ "time"
+)
+
+/*
+Tests decomposion into bits.
+*/
+func TestDecompose(t *testing.T) {
+ h := GetBigInt("925")
+ decx, _ := Decompose(h, 10, 3)
+ if decx[0] != 5 || decx[1] != 2 || decx[2] != 9 {
+ t.Errorf("Assert failure: expected true, actual: %d", decx)
+ }
+}
+
+/*
+Tests Inversion on G1 group.
+*/
+func TestNegScalarBaseMulG1(t *testing.T) {
+ b, _ := rand.Int(rand.Reader, bn256.Order)
+ pb := new(bn256.G1).ScalarBaseMult(b)
+ mb := Sub(new(big.Int).SetInt64(0), b)
+ mpb := new(bn256.G1).ScalarBaseMult(mb)
+ a := new(bn256.G1).Add(pb, mpb)
+ aBytes := a.Marshal()
+ fmt.Println(aBytes)
+ fmt.Println(a)
+ for i := 0; i < len(aBytes)-1; i++ {
+ if aBytes[i] != 0 {
+ t.Errorf("Assert failure: expected true, actual: %t", aBytes[i] == 0)
+ }
+ }
+ if aBytes[len(aBytes)-1] != 1 {
+ t.Errorf("Assert failure: expected true, actual: %t", aBytes[len(aBytes)-1] == 1)
+ }
+}
+
+/*
+Tests Inversion on G2 group.
+*/
+func TestNegScalarBaseMulG2(t *testing.T) {
+ b, _ := rand.Int(rand.Reader, bn256.Order)
+ pb := new(bn256.G2).ScalarBaseMult(b)
+ mb := Sub(new(big.Int).SetInt64(0), b)
+ mpb := new(bn256.G2).ScalarBaseMult(mb)
+ a := new(bn256.G2).Add(pb, mpb)
+ if a.IsZero() != true {
+ t.Errorf("Assert failure: expected true, actual: %t", a.IsZero())
+ }
+}
+
+/*
+Tests Inversion on GFp12 finite field.
+*/
+func TestInvertGFp12(t *testing.T) {
+ b, _ := rand.Int(rand.Reader, bn256.Order)
+ c, _ := rand.Int(rand.Reader, bn256.Order)
+
+ pb, _ := new(bn256.G1).Unmarshal(new(bn256.G1).ScalarBaseMult(b).Marshal())
+ qc, _ := new(bn256.G2).Unmarshal(new(bn256.G2).ScalarBaseMult(c).Marshal())
+
+ k1 := bn256.Pair(pb, qc)
+ k2 := new(bn256.GT).Invert(k1)
+ k3 := new(bn256.GT).Add(k1, k2)
+ if k3.IsOne() != true {
+ t.Errorf("Assert failure: expected true, actual: %t", k3.IsOne())
+ }
+}
+
+/*
+Tests the ZK Range Proof building block, where the interval is [0, U^L).
+*/
+func TestZKRP_UL(t *testing.T) {
+ var (
+ r *big.Int
+ )
+ p, _ := SetupUL(10, 5)
+ r, _ = rand.Int(rand.Reader, bn256.Order)
+ proof_out, _ := ProveUL(new(big.Int).SetInt64(42176), r, p)
+ result, _ := VerifyUL(&proof_out, &p)
+ fmt.Println("ZKRP UL result: ")
+ fmt.Println(result)
+ if result != true {
+ t.Errorf("Assert failure: expected true, actual: %t", result)
+ }
+}
+
+/*
+Tests if the Setup algorithm is rejecting wrong input as expected.
+*/
+func TestZKRPSetupInput(t *testing.T) {
+ var (
+ zkrp ccs08
+ )
+ e := zkrp.Setup(1900, 1899)
+ result := e.Error() != "a must be less than or equal to b"
+ if result {
+ t.Errorf("Assert failure: expected true, actual: %t", result)
+ }
+}
+
+/*
+Tests the ZK Set Membership (CCS08) protocol.
+*/
+func TestZKSet(t *testing.T) {
+ var (
+ r *big.Int
+ s []int64
+ )
+ s = make([]int64, 4)
+ s[0] = 12
+ s[1] = 42
+ s[2] = 61
+ s[3] = 71
+ startTime := time.Now()
+ p, _ := SetupSet(s)
+ setupTime := time.Now()
+ fmt.Println(" ############### Setup time:")
+ fmt.Println(setupTime.Sub(startTime))
+ r, _ = rand.Int(rand.Reader, bn256.Order)
+ proof_out, _ := ProveSet(12, r, p)
+ proofTime := time.Now()
+ fmt.Println("Proof time:")
+ fmt.Println(proofTime.Sub(setupTime))
+ result, _ := VerifySet(&proof_out, &p)
+ verifyTime := time.Now()
+ fmt.Println("Verify time:")
+ fmt.Println(verifyTime.Sub(proofTime))
+ fmt.Println("ZK Set Membership result: ")
+ fmt.Println(result)
+ if result != true {
+ t.Errorf("Assert failure: expected true, actual: %t", result)
+ }
+}
+
+/*
+Tests the entire ZK Range Proof (CCS08) protocol.
+*/
+func TestZKRP(t *testing.T) {
+ var (
+ result bool
+ zkrp ccs08
+ )
+ startTime := time.Now()
+ zkrp.Setup(347184000, 599644800)
+ setupTime := time.Now()
+ fmt.Println(" ############### Setup time:")
+ fmt.Println(setupTime.Sub(startTime))
+ zkrp.x = new(big.Int).SetInt64(419835123)
+ zkrp.r, _ = rand.Int(rand.Reader, bn256.Order)
+ e := zkrp.Prove()
+ proofTime := time.Now()
+ fmt.Println("Proof time:")
+ fmt.Println(proofTime.Sub(setupTime))
+ if e != nil {
+ fmt.Println(e.Error())
+ }
+ result, _ = zkrp.Verify()
+ verifyTime := time.Now()
+ fmt.Println("Verify time:")
+ fmt.Println(verifyTime.Sub(proofTime))
+ fmt.Println("ZKRP result: ")
+ fmt.Println(result)
+ if result != true {
+ t.Errorf("Assert failure: expected true, actual: %t", result)
+ }
+}
diff --git a/src/ConfidentialTx/zkproofs/p256.go b/src/ConfidentialTx/zkproofs/p256.go
new file mode 100644
index 0000000..d80f936
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/p256.go
@@ -0,0 +1,249 @@
+/*
+Encapsulates secp256k1 elliptic curve.
+*/
+
+package zkproofs
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "errors"
+ "math/big"
+ "strconv"
+
+ "../byteconversion"
+ "../crypto/secp256k1"
+)
+
+var (
+ CURVE = secp256k1.S256()
+ GX = CURVE.Gx
+ GY = CURVE.Gy
+)
+
+/*
+Elliptic Curve Point struct.
+*/
+type p256 struct {
+ X, Y *big.Int
+}
+
+/*
+IsZero returns true if and only if the elliptic curve point is the point at infinity.
+*/
+func (p *p256) IsZero() bool {
+ c1 := (p.X == nil || p.Y == nil)
+ if !c1 {
+ z := new(big.Int).SetInt64(0)
+ return p.X.Cmp(z) == 0 && p.Y.Cmp(z) == 0
+ }
+ return true
+}
+
+/*
+Neg returns the inverse of the given elliptic curve point.
+*/
+func (p *p256) Neg(a *p256) *p256 {
+ // (X, Y) -> (X, X + Y)
+ if a.IsZero() {
+ return p.SetInfinity()
+ }
+ one := new(big.Int).SetInt64(1)
+ mone := new(big.Int).Sub(CURVE.N, one)
+ p.ScalarMult(p, mone)
+ return p
+}
+
+/*
+Input points must be distinct
+*/
+func (p *p256) Add(a, b *p256) *p256 {
+ if a.IsZero() {
+ p.X = b.X
+ p.Y = b.Y
+ return p
+ } else if b.IsZero() {
+ p.X = b.X
+ p.Y = b.Y
+ return p
+
+ }
+ resx, resy := CURVE.Add(a.X, a.Y, b.X, b.Y)
+ p.X = resx
+ p.Y = resy
+ return p
+}
+
+/*
+Double returns 2*P, where P is the given elliptic curve point.
+*/
+func (p *p256) Double(a *p256) *p256 {
+ if a.IsZero() {
+ return p.SetInfinity()
+ }
+ resx, resy := CURVE.Double(a.X, a.Y)
+ p.X = resx
+ p.Y = resy
+ return p
+}
+
+/*
+ScalarMul encapsulates the scalar Multiplication Algorithm from secp256k1.
+*/
+func (p *p256) ScalarMult(a *p256, n *big.Int) *p256 {
+ if a.IsZero() {
+ return p.SetInfinity()
+ }
+ cmp := n.Cmp(big.NewInt(0))
+ if cmp == 0 {
+ return p.SetInfinity()
+ }
+ n = Mod(n, CURVE.N)
+ bn := n.Bytes()
+ resx, resy := CURVE.ScalarMult(a.X, a.Y, bn)
+ p.X = resx
+ p.Y = resy
+ return p
+}
+
+/*
+ScalarBaseMult returns the Scalar Multiplication by the base generator.
+*/
+func (p *p256) ScalarBaseMult(n *big.Int) *p256 {
+ cmp := n.Cmp(big.NewInt(0))
+ if cmp == 0 {
+ return p.SetInfinity()
+ }
+ n = Mod(n, CURVE.N)
+ bn := n.Bytes()
+ resx, resy := CURVE.ScalarBaseMult(bn)
+ p.X = resx
+ p.Y = resy
+ return p
+}
+
+/*
+Multiply actually is reponsible for the addition of elliptic curve points.
+The name here is to maintain compatibility with bn256 interface.
+This algorithm verifies if the given elliptic curve points are equal, in which case it
+returns the result of Double function, otherwise it returns the result of Add function.
+*/
+func (p *p256) Multiply(a, b *p256) *p256 {
+ if a.IsZero() {
+ p.X = b.X
+ p.Y = b.Y
+ return p
+ } else if b.IsZero() {
+ p.X = a.X
+ p.Y = a.Y
+ return p
+ }
+ if a.X.Cmp(b.X) == 0 && a.Y.Cmp(b.Y) == 0 {
+ resx, resy := CURVE.Double(a.X, a.Y)
+ p.X = resx
+ p.Y = resy
+ return p
+ }
+ resx, resy := CURVE.Add(a.X, a.Y, b.X, b.Y)
+ p.X = resx
+ p.Y = resy
+ return p
+}
+
+/*
+SetInfinity sets the given elliptic curve point to the point at infinity.
+*/
+func (p *p256) SetInfinity() *p256 {
+ p.X = nil
+ p.Y = nil
+ return p
+}
+
+/*
+String returns the readable representation of the given elliptic curve point, i.e.
+the tuple formed by X and Y coordinates.
+*/
+func (p *p256) String() string {
+ return "p256(" + p.X.String() + "," + p.Y.String() + ")"
+}
+
+/*
+MapToGroup is a hash function that returns a valid elliptic curve point given as
+input a string. It is also known as hash-to-point and is used to obtain a generator
+that has no discrete logarithm known relation, thus addressing the concept of
+NUMS (nothing up my sleeve).
+This implementation is based on the paper:
+Short signatures from the Weil pairing
+Boneh, Lynn and Shacham
+Journal of Cryptology, September 2004, Volume 17, Issue 4, pp 297–319
+*/
+func MapToGroup(m string) (*p256, error) {
+ var (
+ i int
+ buffer bytes.Buffer
+ )
+ i = 0
+ for i < 256 {
+ buffer.Reset()
+ buffer.WriteString(strconv.Itoa(i))
+ buffer.WriteString(m)
+ x, _ := HashToInt(buffer)
+ x = Mod(x, CURVE.P)
+ fx, _ := F(x)
+ fx = Mod(fx, CURVE.P)
+ y := fx.ModSqrt(fx, CURVE.P)
+ if y != nil {
+ p := &p256{X: x, Y: y}
+ if p.IsOnCurve() && !p.IsZero() {
+ return p, nil
+ }
+ }
+ i = i + 1
+ }
+ return nil, errors.New("Failed to Hash-to-point.")
+}
+
+/*
+F receives a big integer x as input and return x^3 + 7 mod ORDER.
+*/
+func F(x *big.Int) (*big.Int, error) {
+ // Compute x^2
+ x3p7 := Multiply(x, x)
+ x3p7 = Mod(x3p7, CURVE.P)
+ // Compute x^3
+ x3p7 = Multiply(x3p7, x)
+ x3p7 = Mod(x3p7, CURVE.P)
+ // Compute X^3 + 7
+ x3p7 = Add(x3p7, new(big.Int).SetInt64(7))
+ x3p7 = Mod(x3p7, CURVE.P)
+ return x3p7, nil
+}
+
+/*
+Hash is responsible for the computing a Zp element given the input string.
+*/
+func HashToInt(b bytes.Buffer) (*big.Int, error) {
+ digest := sha256.New()
+ digest.Write(b.Bytes())
+ output := digest.Sum(nil)
+ tmp := output[0:len(output)]
+ return byteconversion.FromByteArray(tmp)
+}
+
+/*
+IsOnCurve returns TRUE if and only if p has coordinates X and Y that satisfy the
+Elliptic Curve equation: y^2 = x^3 + 7.
+*/
+func (p *p256) IsOnCurve() bool {
+ // y² = x³ + 7
+ y2 := new(big.Int).Mul(p.Y, p.Y)
+ y2.Mod(y2, CURVE.P)
+
+ x3 := new(big.Int).Mul(p.X, p.X)
+ x3.Mul(x3, p.X)
+
+ x3.Add(x3, new(big.Int).SetInt64(7))
+ x3.Mod(x3, CURVE.P)
+
+ return x3.Cmp(y2) == 0
+}
diff --git a/src/ConfidentialTx/zkproofs/p256_test.go b/src/ConfidentialTx/zkproofs/p256_test.go
new file mode 100644
index 0000000..8290f8d
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/p256_test.go
@@ -0,0 +1,82 @@
+
+package zkproofs
+
+import (
+ "crypto/rand"
+ "testing"
+ "math/big"
+ "github.com/ing-bank/zkproofs/go-ethereum/crypto/secp256k1"
+)
+
+const TestCount = 1000
+
+func TestIsZero(t *testing.T) {
+ curve := secp256k1.S256()
+ a := make([]byte, 32)
+ a = curve.N.Bytes()
+ Ax, Ay := curve.ScalarBaseMult(a)
+ p1 := p256{X:Ax, Y:Ay}
+ res := p1.IsZero()
+ if res != true {
+ t.Errorf("Assert failure: expected true, actual: %t", res)
+ }
+}
+
+func TestAdd(t * testing.T) {
+ curve := secp256k1.S256()
+ a1 := new(big.Int).SetInt64(71).Bytes()
+ A1x, A1y := curve.ScalarBaseMult(a1)
+ p1 := &p256{X:A1x, Y:A1y}
+ a2 := new(big.Int).SetInt64(17).Bytes()
+ A2x, A2y := curve.ScalarBaseMult(a2)
+ p2 := &p256{X:A2x, Y:A2y}
+ p3 := p1.Add(p1, p2)
+ sa := new(big.Int).SetInt64(-88).Bytes()
+ sAx, sAy := curve.ScalarBaseMult(sa)
+ sp := &p256{X:sAx, Y:sAy}
+ p4 := p3.Add(p3, sp)
+ res := p4.IsZero()
+ if res != true {
+ t.Errorf("Assert failure: expected true, actual: %t", res)
+ }
+}
+
+func TestScalarMultp256(t *testing.T) {
+ curve := secp256k1.S256()
+ a1 := new(big.Int).SetInt64(71).Bytes()
+ Ax, Ay := curve.ScalarBaseMult(a1)
+ p1 := &p256{X:Ax, Y:Ay}
+ pr := p1.ScalarMult(p1, curve.N)
+ res := pr.IsZero()
+ if res != true {
+ t.Errorf("Assert failure: expected true, actual: %t", res)
+ }
+}
+
+func TestScalarBaseMult(t *testing.T) {
+ a1 := new(big.Int).SetInt64(71)
+ p1 := new(p256).ScalarBaseMult(a1)
+ res := p1.IsZero()
+ if res != false {
+ t.Errorf("Assert failure: expected false, actual: %t", res)
+ }
+}
+
+func TestMapToGroup(t *testing.T) {
+ curve := secp256k1.S256()
+ m := "Testing Hash-to-point function:"
+ p, _ := MapToGroup(m)
+ p.ScalarMult(p, curve.N)
+}
+
+func BenchmarkScalarMultp256(b *testing.B) {
+ a := make([]byte, 32)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ rand.Read(a)
+ _ = new(p256).ScalarBaseMult(new(big.Int).SetBytes(a))
+ }
+}
+
+
+
diff --git a/src/ConfidentialTx/zkproofs/util.go b/src/ConfidentialTx/zkproofs/util.go
new file mode 100644
index 0000000..1a644b8
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/util.go
@@ -0,0 +1,117 @@
+// Copyright 2018 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package zkproofs
+
+import (
+ "crypto/sha256"
+ "math/big"
+
+ "../byteconversion"
+ "../crypto/bn256"
+)
+
+//Constants that are going to be used frequently, then we just need to compute them once.
+var (
+ G1 = new(bn256.G1).ScalarBaseMult(new(big.Int).SetInt64(1))
+ G2 = new(bn256.G2).ScalarBaseMult(new(big.Int).SetInt64(1))
+ E = bn256.Pair(G1, G2)
+)
+
+/*
+Decompose receives as input a bigint x and outputs an array of integers such that
+x = sum(xi.u^i), i.e. it returns the decomposition of x into base u.
+*/
+func Decompose(x *big.Int, u int64, l int64) ([]int64, error) {
+ var (
+ result []int64
+ i int64
+ )
+ result = make([]int64, l, l)
+ i = 0
+ for i < l {
+ result[i] = Mod(x, new(big.Int).SetInt64(u)).Int64()
+ x = new(big.Int).Div(x, new(big.Int).SetInt64(u))
+ i = i + 1
+ }
+ return result, nil
+}
+
+/*
+Commit method corresponds to the Pedersen commitment scheme. Namely, given input
+message x, and randomness r, it outputs g^x.h^r.
+*/
+func Commit(x, r *big.Int, h *bn256.G2) (*bn256.G2, error) {
+ var (
+ C *bn256.G2
+ )
+ C = new(bn256.G2).ScalarBaseMult(x)
+ C.Add(C, new(bn256.G2).ScalarMult(h, r))
+ return C, nil
+}
+
+/*
+CommitG1 method corresponds to the Pedersen commitment scheme. Namely, given input
+message x, and randomness r, it outputs g^x.h^r.
+*/
+func CommitG1(x, r *big.Int, h *p256) (*p256, error) {
+ var (
+ C *p256
+ )
+ C = new(p256).ScalarBaseMult(x)
+ Hr := new(p256).ScalarMult(h, r)
+ C.Add(C, Hr)
+ return C, nil
+}
+
+func Mult(a *p256, n *big.Int) *p256 {
+ return new(p256).ScalarMult(a, n)
+}
+
+/*
+HashSet is responsible for the computing a Zp element given elements from GT and G2.
+*/
+func HashSet(a *bn256.GT, D *bn256.G2) (*big.Int, error) {
+ digest := sha256.New()
+ digest.Write([]byte(a.String()))
+ digest.Write([]byte(D.String()))
+ output := digest.Sum(nil)
+ tmp := output[0:len(output)]
+ return byteconversion.FromByteArray(tmp)
+}
+
+/*
+Hash is responsible for the computing a Zp element given elements from GT and G2.
+*/
+func Hash(a []*bn256.GT, D *bn256.G2) (*big.Int, error) {
+ digest := sha256.New()
+ for i := range a {
+ digest.Write([]byte(a[i].String()))
+ }
+ digest.Write([]byte(D.String()))
+ output := digest.Sum(nil)
+ tmp := output[0:len(output)]
+ return byteconversion.FromByteArray(tmp)
+}
+
+/*
+Read big integer in base 10 from string.
+*/
+func GetBigInt(value string) *big.Int {
+ i := new(big.Int)
+ i.SetString(value, 10)
+ return i
+}
diff --git a/src/ConfidentialTx/zkproofs/util_test.go b/src/ConfidentialTx/zkproofs/util_test.go
new file mode 100644
index 0000000..b225ca1
--- /dev/null
+++ b/src/ConfidentialTx/zkproofs/util_test.go
@@ -0,0 +1,18 @@
+// Copyright 2018 ING Bank N.V.
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package zkproofs
+