jl777
9 years ago
83 changed files with 16764 additions and 36 deletions
@ -0,0 +1,51 @@ |
|||
#define USE_BASIC_CONFIG 1 |
|||
#define USE_NUM_GMP 1 |
|||
#define USE_FIELD_10X26 1 |
|||
|
|||
#define USE_NUM_NONE 1 |
|||
#define USE_FIELD_INV_BUILTIN 1 |
|||
#define USE_SCALAR_INV_BUILTIN 1 |
|||
#include "secp256k1/libsecp256k1-config.h" |
|||
|
|||
#include "secp256k1/secp256k1.h" |
|||
|
|||
#include "secp256k1/util.h" |
|||
#include "secp256k1/scalar.h" |
|||
#include "secp256k1/scalar_4x64.h" |
|||
#include "secp256k1/scalar_8x32.h" |
|||
#include "secp256k1/scalar_impl.h" |
|||
#include "secp256k1/scalar_4x64_impl.h" |
|||
#include "secp256k1/scalar_8x32_impl.h" |
|||
#include "secp256k1/group.h" |
|||
#include "secp256k1/group_impl.h" |
|||
#include "secp256k1/num_gmp.h" |
|||
#include "secp256k1/num_gmp_impl.h" |
|||
#include "secp256k1/ecdsa.h" |
|||
#include "secp256k1/ecdsa_impl.h" |
|||
#include "secp256k1/eckey.h" |
|||
#include "secp256k1/eckey_impl.h" |
|||
#include "secp256k1/ecmult.h" |
|||
#include "secp256k1/ecmult_impl.h" |
|||
#include "secp256k1/ecmult_const.h" |
|||
#include "secp256k1/ecmult_const_impl.h" |
|||
#include "secp256k1/ecmult_gen.h" |
|||
#include "secp256k1/ecmult_gen_impl.h" |
|||
#include "secp256k1/num.h" |
|||
#include "secp256k1/num_impl.h" |
|||
#include "secp256k1/field_10x26.h" |
|||
#include "secp256k1/field_10x26_impl.h" |
|||
#include "secp256k1/field_5x52.h" |
|||
#include "secp256k1/field_5x52_impl.h" |
|||
#include "secp256k1/field_5x52_int128_impl.h" |
|||
#include "secp256k1/field_5x52_asm_impl.h" |
|||
#include "secp256k1/java/org_bitcoin_NativeSecp256k1.h" |
|||
#include "secp256k1/testrand.h" |
|||
#include "secp256k1/testrand_impl.h" |
|||
#include "secp256k1/hash.h" |
|||
#include "secp256k1/hash_impl.h" |
|||
#include "secp256k1/field.h" |
|||
#include "secp256k1/field_impl.h" |
|||
#include "secp256k1/bench.h" |
|||
|
|||
#include "secp256k1/secp256k1.c" |
|||
|
@ -0,0 +1,60 @@ |
|||
package org.bitcoin; |
|||
|
|||
import java.nio.ByteBuffer; |
|||
import java.nio.ByteOrder; |
|||
|
|||
import com.google.common.base.Preconditions; |
|||
|
|||
|
|||
/** |
|||
* This class holds native methods to handle ECDSA verification. |
|||
* You can find an example library that can be used for this at |
|||
* https://github.com/sipa/secp256k1
|
|||
*/ |
|||
public class NativeSecp256k1 { |
|||
public static final boolean enabled; |
|||
static { |
|||
boolean isEnabled = true; |
|||
try { |
|||
System.loadLibrary("javasecp256k1"); |
|||
} catch (UnsatisfiedLinkError e) { |
|||
isEnabled = false; |
|||
} |
|||
enabled = isEnabled; |
|||
} |
|||
|
|||
private static ThreadLocal<ByteBuffer> nativeECDSABuffer = new ThreadLocal<ByteBuffer>(); |
|||
/** |
|||
* Verifies the given secp256k1 signature in native code. |
|||
* Calling when enabled == false is undefined (probably library not loaded) |
|||
* |
|||
* @param data The data which was signed, must be exactly 32 bytes |
|||
* @param signature The signature |
|||
* @param pub The public key which did the signing |
|||
*/ |
|||
public static boolean verify(byte[] data, byte[] signature, byte[] pub) { |
|||
Preconditions.checkArgument(data.length == 32 && signature.length <= 520 && pub.length <= 520); |
|||
|
|||
ByteBuffer byteBuff = nativeECDSABuffer.get(); |
|||
if (byteBuff == null) { |
|||
byteBuff = ByteBuffer.allocateDirect(32 + 8 + 520 + 520); |
|||
byteBuff.order(ByteOrder.nativeOrder()); |
|||
nativeECDSABuffer.set(byteBuff); |
|||
} |
|||
byteBuff.rewind(); |
|||
byteBuff.put(data); |
|||
byteBuff.putInt(signature.length); |
|||
byteBuff.putInt(pub.length); |
|||
byteBuff.put(signature); |
|||
byteBuff.put(pub); |
|||
return secp256k1_ecdsa_verify(byteBuff) == 1; |
|||
} |
|||
|
|||
/** |
|||
* @param byteBuff signature format is byte[32] data, |
|||
* native-endian int signatureLength, native-endian int pubkeyLength, |
|||
* byte[signatureLength] signature, byte[pubkeyLength] pub |
|||
* @returns 1 for valid signature, anything else for invalid |
|||
*/ |
|||
private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff); |
|||
} |
@ -0,0 +1,32 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_BASIC_CONFIG_ |
|||
#define _SECP256K1_BASIC_CONFIG_ |
|||
|
|||
#ifdef USE_BASIC_CONFIG |
|||
|
|||
#undef USE_ASM_X86_64 |
|||
#undef USE_ENDOMORPHISM |
|||
#undef USE_FIELD_10X26 |
|||
#undef USE_FIELD_5X52 |
|||
#undef USE_FIELD_INV_BUILTIN |
|||
#undef USE_FIELD_INV_NUM |
|||
#undef USE_NUM_GMP |
|||
#undef USE_NUM_NONE |
|||
#undef USE_SCALAR_4X64 |
|||
#undef USE_SCALAR_8X32 |
|||
#undef USE_SCALAR_INV_BUILTIN |
|||
#undef USE_SCALAR_INV_NUM |
|||
|
|||
#define USE_NUM_NONE 1 |
|||
#define USE_FIELD_INV_BUILTIN 1 |
|||
#define USE_SCALAR_INV_BUILTIN 1 |
|||
#define USE_FIELD_10X26 1 |
|||
#define USE_SCALAR_8X32 1 |
|||
|
|||
#endif // USE_BASIC_CONFIG
|
|||
#endif // _SECP256K1_BASIC_CONFIG_
|
@ -0,0 +1,56 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_BENCH_H_ |
|||
#define _SECP256K1_BENCH_H_ |
|||
|
|||
#include <stdio.h> |
|||
#include <math.h> |
|||
#include "sys/time.h" |
|||
|
|||
static double gettimedouble(void) { |
|||
struct timeval tv; |
|||
gettimeofday(&tv, NULL); |
|||
return tv.tv_usec * 0.000001 + tv.tv_sec; |
|||
} |
|||
|
|||
void print_number(double x) { |
|||
double y = x; |
|||
int c = 0; |
|||
if (y < 0.0) y = -y; |
|||
while (y < 100.0) { |
|||
y *= 10.0; |
|||
c++; |
|||
} |
|||
printf("%.*f", c, x); |
|||
} |
|||
|
|||
void run_benchmark(char *name, void (*benchmark)(void*), void (*setup)(void*), void (*teardown)(void*), void* data, int count, int iter) { |
|||
int i; |
|||
double min = HUGE_VAL; |
|||
double sum = 0.0; |
|||
double max = 0.0; |
|||
for (i = 0; i < count; i++) { |
|||
double begin, total; |
|||
if (setup) setup(data); |
|||
begin = gettimedouble(); |
|||
benchmark(data); |
|||
total = gettimedouble() - begin; |
|||
if (teardown) teardown(data); |
|||
if (total < min) min = total; |
|||
if (total > max) max = total; |
|||
sum += total; |
|||
} |
|||
printf("%s: min ", name); |
|||
print_number(min * 1000000.0 / iter); |
|||
printf("us / avg "); |
|||
print_number((sum / count) * 1000000.0 / iter); |
|||
printf("us / max "); |
|||
print_number(max * 1000000.0 / iter); |
|||
printf("us\n"); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,51 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2015 Pieter Wuille, Andrew Poelstra * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#include <string.h> |
|||
|
|||
#include "include/secp256k1.h" |
|||
#include "include/secp256k1_ecdh.h" |
|||
#include "util.h" |
|||
#include "bench.h" |
|||
|
|||
typedef struct { |
|||
secp256k1_context_t *ctx; |
|||
secp256k1_pubkey_t point; |
|||
unsigned char scalar[32]; |
|||
} bench_ecdh_t; |
|||
|
|||
static void bench_ecdh_setup(void* arg) { |
|||
int i; |
|||
bench_ecdh_t *data = (bench_ecdh_t*)arg; |
|||
const unsigned char point[] = { |
|||
0x03, |
|||
0x54, 0x94, 0xc1, 0x5d, 0x32, 0x09, 0x97, 0x06, |
|||
0xc2, 0x39, 0x5f, 0x94, 0x34, 0x87, 0x45, 0xfd, |
|||
0x75, 0x7c, 0xe3, 0x0e, 0x4e, 0x8c, 0x90, 0xfb, |
|||
0xa2, 0xba, 0xd1, 0x84, 0xf8, 0x83, 0xc6, 0x9f |
|||
}; |
|||
|
|||
data->ctx = secp256k1_context_create(0); |
|||
for (i = 0; i < 32; i++) data->scalar[i] = i + 1; |
|||
CHECK(secp256k1_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); |
|||
} |
|||
|
|||
static void bench_ecdh(void* arg) { |
|||
int i; |
|||
unsigned char res[32]; |
|||
bench_ecdh_t *data = (bench_ecdh_t*)arg; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
CHECK(secp256k1_ecdh(data->ctx, res, &data->point, data->scalar) == 1); |
|||
} |
|||
} |
|||
|
|||
int main(void) { |
|||
bench_ecdh_t data; |
|||
|
|||
run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, 20000); |
|||
return 0; |
|||
} |
@ -0,0 +1,331 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014-2015 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
#include <stdio.h> |
|||
|
|||
#include "include/secp256k1.h" |
|||
|
|||
#include "util.h" |
|||
#include "hash_impl.h" |
|||
#include "num_impl.h" |
|||
#include "field_impl.h" |
|||
#include "group_impl.h" |
|||
#include "scalar_impl.h" |
|||
#include "ecmult_const_impl.h" |
|||
#include "ecmult_impl.h" |
|||
#include "bench.h" |
|||
|
|||
typedef struct { |
|||
secp256k1_scalar_t scalar_x, scalar_y; |
|||
secp256k1_fe_t fe_x, fe_y; |
|||
secp256k1_ge_t ge_x, ge_y; |
|||
secp256k1_gej_t gej_x, gej_y; |
|||
unsigned char data[64]; |
|||
int wnaf[256]; |
|||
} bench_inv_t; |
|||
|
|||
void bench_setup(void* arg) { |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
static const unsigned char init_x[32] = { |
|||
0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13, |
|||
0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35, |
|||
0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59, |
|||
0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83 |
|||
}; |
|||
|
|||
static const unsigned char init_y[32] = { |
|||
0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83, |
|||
0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5, |
|||
0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9, |
|||
0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3 |
|||
}; |
|||
|
|||
secp256k1_scalar_set_b32(&data->scalar_x, init_x, NULL); |
|||
secp256k1_scalar_set_b32(&data->scalar_y, init_y, NULL); |
|||
secp256k1_fe_set_b32(&data->fe_x, init_x); |
|||
secp256k1_fe_set_b32(&data->fe_y, init_y); |
|||
CHECK(secp256k1_ge_set_xo_var(&data->ge_x, &data->fe_x, 0)); |
|||
CHECK(secp256k1_ge_set_xo_var(&data->ge_y, &data->fe_y, 1)); |
|||
secp256k1_gej_set_ge(&data->gej_x, &data->ge_x); |
|||
secp256k1_gej_set_ge(&data->gej_y, &data->ge_y); |
|||
memcpy(data->data, init_x, 32); |
|||
memcpy(data->data + 32, init_y, 32); |
|||
} |
|||
|
|||
void bench_scalar_add(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 2000000; i++) { |
|||
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); |
|||
} |
|||
} |
|||
|
|||
void bench_scalar_negate(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 2000000; i++) { |
|||
secp256k1_scalar_negate(&data->scalar_x, &data->scalar_x); |
|||
} |
|||
} |
|||
|
|||
void bench_scalar_sqr(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 200000; i++) { |
|||
secp256k1_scalar_sqr(&data->scalar_x, &data->scalar_x); |
|||
} |
|||
} |
|||
|
|||
void bench_scalar_mul(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 200000; i++) { |
|||
secp256k1_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y); |
|||
} |
|||
} |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
void bench_scalar_split(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_scalar_t l, r; |
|||
secp256k1_scalar_split_lambda(&l, &r, &data->scalar_x); |
|||
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); |
|||
} |
|||
} |
|||
#endif |
|||
|
|||
void bench_scalar_inverse(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 2000; i++) { |
|||
secp256k1_scalar_inverse(&data->scalar_x, &data->scalar_x); |
|||
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); |
|||
} |
|||
} |
|||
|
|||
void bench_scalar_inverse_var(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 2000; i++) { |
|||
secp256k1_scalar_inverse_var(&data->scalar_x, &data->scalar_x); |
|||
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); |
|||
} |
|||
} |
|||
|
|||
void bench_field_normalize(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 2000000; i++) { |
|||
secp256k1_fe_normalize(&data->fe_x); |
|||
} |
|||
} |
|||
|
|||
void bench_field_normalize_weak(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 2000000; i++) { |
|||
secp256k1_fe_normalize_weak(&data->fe_x); |
|||
} |
|||
} |
|||
|
|||
void bench_field_mul(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 200000; i++) { |
|||
secp256k1_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y); |
|||
} |
|||
} |
|||
|
|||
void bench_field_sqr(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 200000; i++) { |
|||
secp256k1_fe_sqr(&data->fe_x, &data->fe_x); |
|||
} |
|||
} |
|||
|
|||
void bench_field_inverse(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_fe_inv(&data->fe_x, &data->fe_x); |
|||
secp256k1_fe_add(&data->fe_x, &data->fe_y); |
|||
} |
|||
} |
|||
|
|||
void bench_field_inverse_var(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_fe_inv_var(&data->fe_x, &data->fe_x); |
|||
secp256k1_fe_add(&data->fe_x, &data->fe_y); |
|||
} |
|||
} |
|||
|
|||
void bench_field_sqrt_var(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_fe_sqrt_var(&data->fe_x, &data->fe_x); |
|||
secp256k1_fe_add(&data->fe_x, &data->fe_y); |
|||
} |
|||
} |
|||
|
|||
void bench_group_double_var(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 200000; i++) { |
|||
secp256k1_gej_double_var(&data->gej_x, &data->gej_x, NULL); |
|||
} |
|||
} |
|||
|
|||
void bench_group_add_var(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 200000; i++) { |
|||
secp256k1_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL); |
|||
} |
|||
} |
|||
|
|||
void bench_group_add_affine(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 200000; i++) { |
|||
secp256k1_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y); |
|||
} |
|||
} |
|||
|
|||
void bench_group_add_affine_var(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 200000; i++) { |
|||
secp256k1_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL); |
|||
} |
|||
} |
|||
|
|||
void bench_ecmult_wnaf(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A); |
|||
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); |
|||
} |
|||
} |
|||
|
|||
void bench_wnaf_const(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_wnaf_const(data->wnaf, data->scalar_x, WINDOW_A); |
|||
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); |
|||
} |
|||
} |
|||
|
|||
|
|||
void bench_sha256(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
secp256k1_sha256_t sha; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_sha256_initialize(&sha); |
|||
secp256k1_sha256_write(&sha, data->data, 32); |
|||
secp256k1_sha256_finalize(&sha, data->data); |
|||
} |
|||
} |
|||
|
|||
void bench_hmac_sha256(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
secp256k1_hmac_sha256_t hmac; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_hmac_sha256_initialize(&hmac, data->data, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, data->data, 32); |
|||
secp256k1_hmac_sha256_finalize(&hmac, data->data); |
|||
} |
|||
} |
|||
|
|||
void bench_rfc6979_hmac_sha256(void* arg) { |
|||
int i; |
|||
bench_inv_t *data = (bench_inv_t*)arg; |
|||
secp256k1_rfc6979_hmac_sha256_t rng; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); |
|||
secp256k1_rfc6979_hmac_sha256_generate(&rng, data->data, 32); |
|||
} |
|||
} |
|||
|
|||
|
|||
int have_flag(int argc, char** argv, char *flag) { |
|||
char** argm = argv + argc; |
|||
argv++; |
|||
if (argv == argm) { |
|||
return 1; |
|||
} |
|||
while (argv != NULL && argv != argm) { |
|||
if (strcmp(*argv, flag) == 0) return 1; |
|||
argv++; |
|||
} |
|||
return 0; |
|||
} |
|||
|
|||
int main(int argc, char **argv) { |
|||
bench_inv_t data; |
|||
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, 2000000); |
|||
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, 2000000); |
|||
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, 200000); |
|||
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, 200000); |
|||
#ifdef USE_ENDOMORPHISM |
|||
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, 20000); |
|||
#endif |
|||
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000); |
|||
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000); |
|||
|
|||
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, 2000000); |
|||
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, 2000000); |
|||
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, 200000); |
|||
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "mul")) run_benchmark("field_mul", bench_field_mul, bench_setup, NULL, &data, 10, 200000); |
|||
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse", bench_field_inverse, bench_setup, NULL, &data, 10, 20000); |
|||
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse_var", bench_field_inverse_var, bench_setup, NULL, &data, 10, 20000); |
|||
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt_var", bench_field_sqrt_var, bench_setup, NULL, &data, 10, 20000); |
|||
|
|||
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "double")) run_benchmark("group_double_var", bench_group_double_var, bench_setup, NULL, &data, 10, 200000); |
|||
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, 200000); |
|||
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, 200000); |
|||
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, 200000); |
|||
|
|||
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, 20000); |
|||
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, 20000); |
|||
|
|||
if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "sha256")) run_benchmark("hash_sha256", bench_sha256, bench_setup, NULL, &data, 10, 20000); |
|||
if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "hmac")) run_benchmark("hash_hmac_sha256", bench_hmac_sha256, bench_setup, NULL, &data, 10, 20000); |
|||
if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "rng6979")) run_benchmark("hash_rfc6979_hmac_sha256", bench_rfc6979_hmac_sha256, bench_setup, NULL, &data, 10, 20000); |
|||
return 0; |
|||
} |
@ -0,0 +1,65 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014, 2015 Pieter Wuille, Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#include <stdint.h> |
|||
|
|||
#include "include/secp256k1_rangeproof.h" |
|||
#include "util.h" |
|||
#include "bench.h" |
|||
|
|||
typedef struct { |
|||
secp256k1_context_t* ctx; |
|||
unsigned char commit[33]; |
|||
unsigned char proof[5134]; |
|||
unsigned char blind[32]; |
|||
int len; |
|||
int min_bits; |
|||
uint64_t v; |
|||
} bench_rangeproof_t; |
|||
|
|||
static void bench_rangeproof_setup(void* arg) { |
|||
int i; |
|||
uint64_t minv; |
|||
uint64_t maxv; |
|||
bench_rangeproof_t *data = (bench_rangeproof_t*)arg; |
|||
|
|||
data->v = 0; |
|||
for (i = 0; i < 32; i++) data->blind[i] = i + 1; |
|||
CHECK(secp256k1_pedersen_commit(data->ctx, data->commit, data->blind, data->v)); |
|||
data->len = 5134; |
|||
CHECK(secp256k1_rangeproof_sign(data->ctx, data->proof, &data->len, 0, data->commit, data->blind, data->commit, 0, data->min_bits, data->v)); |
|||
CHECK(secp256k1_rangeproof_verify(data->ctx, &minv, &maxv, data->commit, data->proof, data->len)); |
|||
} |
|||
|
|||
static void bench_rangeproof(void* arg) { |
|||
int i; |
|||
bench_rangeproof_t *data = (bench_rangeproof_t*)arg; |
|||
|
|||
for (i = 0; i < 1000; i++) { |
|||
int j; |
|||
uint64_t minv; |
|||
uint64_t maxv; |
|||
j = secp256k1_rangeproof_verify(data->ctx, &minv, &maxv, data->commit, data->proof, data->len); |
|||
for (j = 0; j < 4; j++) { |
|||
data->proof[j + 2 + 32 *((data->min_bits + 1) >> 1) - 4] = (i >> 8)&255; |
|||
} |
|||
} |
|||
} |
|||
|
|||
int main(void) { |
|||
bench_rangeproof_t data; |
|||
|
|||
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); |
|||
secp256k1_pedersen_context_initialize(data.ctx); |
|||
secp256k1_rangeproof_context_initialize(data.ctx); |
|||
|
|||
data.min_bits = 32; |
|||
|
|||
run_benchmark("rangeproof_verify_bit", bench_rangeproof, bench_rangeproof_setup, NULL, &data, 10, 1000 * data.min_bits); |
|||
|
|||
secp256k1_context_destroy(data.ctx); |
|||
return 0; |
|||
} |
@ -0,0 +1,55 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#include "include/secp256k1.h" |
|||
#include "util.h" |
|||
#include "bench.h" |
|||
|
|||
typedef struct { |
|||
secp256k1_context_t *ctx; |
|||
unsigned char msg[32]; |
|||
unsigned char sig[64]; |
|||
} bench_recover_t; |
|||
|
|||
void bench_recover(void* arg) { |
|||
int i; |
|||
bench_recover_t *data = (bench_recover_t*)arg; |
|||
secp256k1_pubkey_t pubkey; |
|||
unsigned char pubkeyc[33]; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
int j; |
|||
int pubkeylen = 33; |
|||
secp256k1_ecdsa_signature_t sig; |
|||
CHECK(secp256k1_ecdsa_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); |
|||
CHECK(secp256k1_ecdsa_recover(data->ctx, data->msg, &sig, &pubkey)); |
|||
CHECK(secp256k1_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, 1)); |
|||
for (j = 0; j < 32; j++) { |
|||
data->sig[j + 32] = data->msg[j]; /* Move former message to S. */ |
|||
data->msg[j] = data->sig[j]; /* Move former R to message. */ |
|||
data->sig[j] = pubkeyc[j + 1]; /* Move recovered pubkey X coordinate to R (which must be a valid X coordinate). */ |
|||
} |
|||
} |
|||
} |
|||
|
|||
void bench_recover_setup(void* arg) { |
|||
int i; |
|||
bench_recover_t *data = (bench_recover_t*)arg; |
|||
|
|||
for (i = 0; i < 32; i++) data->msg[i] = 1 + i; |
|||
for (i = 0; i < 64; i++) data->sig[i] = 65 + i; |
|||
} |
|||
|
|||
int main(void) { |
|||
bench_recover_t data; |
|||
|
|||
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); |
|||
|
|||
run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, 20000); |
|||
|
|||
secp256k1_context_destroy(data.ctx); |
|||
return 0; |
|||
} |
@ -0,0 +1,69 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#include <stdio.h> |
|||
#include <string.h> |
|||
|
|||
#include "include/secp256k1.h" |
|||
#include "include/secp256k1_schnorr.h" |
|||
#include "util.h" |
|||
#include "bench.h" |
|||
|
|||
typedef struct { |
|||
unsigned char key[32]; |
|||
unsigned char sig[64]; |
|||
unsigned char pubkey[33]; |
|||
int pubkeylen; |
|||
} benchmark_schnorr_sig_t; |
|||
|
|||
typedef struct { |
|||
secp256k1_context_t *ctx; |
|||
unsigned char msg[32]; |
|||
benchmark_schnorr_sig_t sigs[64]; |
|||
int numsigs; |
|||
} benchmark_schnorr_verify_t; |
|||
|
|||
static void benchmark_schnorr_init(void* arg) { |
|||
int i, k; |
|||
benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg; |
|||
|
|||
for (i = 0; i < 32; i++) data->msg[i] = 1 + i; |
|||
for (k = 0; k < data->numsigs; k++) { |
|||
secp256k1_pubkey_t pubkey; |
|||
for (i = 0; i < 32; i++) data->sigs[k].key[i] = 33 + i + k; |
|||
secp256k1_schnorr_sign(data->ctx, data->msg, data->sigs[k].sig, data->sigs[k].key, NULL, NULL); |
|||
data->sigs[k].pubkeylen = 33; |
|||
CHECK(secp256k1_ec_pubkey_create(data->ctx, &pubkey, data->sigs[k].key)); |
|||
CHECK(secp256k1_ec_pubkey_serialize(data->ctx, data->sigs[k].pubkey, &data->sigs[k].pubkeylen, &pubkey, 1)); |
|||
} |
|||
} |
|||
|
|||
static void benchmark_schnorr_verify(void* arg) { |
|||
int i; |
|||
benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg; |
|||
|
|||
for (i = 0; i < 20000 / data->numsigs; i++) { |
|||
secp256k1_pubkey_t pubkey; |
|||
data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF); |
|||
CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->sigs[0].pubkey, data->sigs[0].pubkeylen)); |
|||
CHECK(secp256k1_schnorr_verify(data->ctx, data->msg, data->sigs[0].sig, &pubkey) == ((i & 0xFF) == 0)); |
|||
data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF); |
|||
} |
|||
} |
|||
|
|||
|
|||
|
|||
int main(void) { |
|||
benchmark_schnorr_verify_t data; |
|||
|
|||
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); |
|||
|
|||
data.numsigs = 1; |
|||
run_benchmark("schnorr_verify", benchmark_schnorr_verify, benchmark_schnorr_init, NULL, &data, 10, 20000); |
|||
|
|||
secp256k1_context_destroy(data.ctx); |
|||
return 0; |
|||
} |
@ -0,0 +1,52 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#include "include/secp256k1.h" |
|||
#include "util.h" |
|||
#include "bench.h" |
|||
|
|||
typedef struct { |
|||
secp256k1_context_t* ctx; |
|||
unsigned char msg[32]; |
|||
unsigned char key[32]; |
|||
} bench_sign_t; |
|||
|
|||
static void bench_sign_setup(void* arg) { |
|||
int i; |
|||
bench_sign_t *data = (bench_sign_t*)arg; |
|||
|
|||
for (i = 0; i < 32; i++) data->msg[i] = i + 1; |
|||
for (i = 0; i < 32; i++) data->key[i] = i + 65; |
|||
} |
|||
|
|||
static void bench_sign(void* arg) { |
|||
int i; |
|||
bench_sign_t *data = (bench_sign_t*)arg; |
|||
|
|||
unsigned char sig[64]; |
|||
for (i = 0; i < 20000; i++) { |
|||
int j; |
|||
int recid = 0; |
|||
secp256k1_ecdsa_signature_t signature; |
|||
CHECK(secp256k1_ecdsa_sign(data->ctx, data->msg, &signature, data->key, NULL, NULL)); |
|||
CHECK(secp256k1_ecdsa_signature_serialize_compact(data->ctx, sig, &recid, &signature)); |
|||
for (j = 0; j < 32; j++) { |
|||
data->msg[j] = sig[j]; /* Move former R to message. */ |
|||
data->key[j] = sig[j + 32]; /* Move former S to key. */ |
|||
} |
|||
} |
|||
} |
|||
|
|||
int main(void) { |
|||
bench_sign_t data; |
|||
|
|||
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); |
|||
|
|||
run_benchmark("ecdsa_sign", bench_sign, bench_sign_setup, NULL, &data, 10, 20000); |
|||
|
|||
secp256k1_context_destroy(data.ctx); |
|||
return 0; |
|||
} |
@ -0,0 +1,63 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#include <stdio.h> |
|||
#include <string.h> |
|||
|
|||
#include "include/secp256k1.h" |
|||
#include "util.h" |
|||
#include "bench.h" |
|||
|
|||
typedef struct { |
|||
secp256k1_context_t *ctx; |
|||
unsigned char msg[32]; |
|||
unsigned char key[32]; |
|||
unsigned char sig[72]; |
|||
int siglen; |
|||
unsigned char pubkey[33]; |
|||
int pubkeylen; |
|||
} benchmark_verify_t; |
|||
|
|||
static void benchmark_verify(void* arg) { |
|||
int i; |
|||
benchmark_verify_t* data = (benchmark_verify_t*)arg; |
|||
|
|||
for (i = 0; i < 20000; i++) { |
|||
secp256k1_pubkey_t pubkey; |
|||
secp256k1_ecdsa_signature_t sig; |
|||
data->sig[data->siglen - 1] ^= (i & 0xFF); |
|||
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); |
|||
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); |
|||
CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); |
|||
CHECK(secp256k1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); |
|||
CHECK(secp256k1_ecdsa_verify(data->ctx, data->msg, &sig, &pubkey) == (i == 0)); |
|||
data->sig[data->siglen - 1] ^= (i & 0xFF); |
|||
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); |
|||
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); |
|||
} |
|||
} |
|||
|
|||
int main(void) { |
|||
int i; |
|||
secp256k1_pubkey_t pubkey; |
|||
secp256k1_ecdsa_signature_t sig; |
|||
benchmark_verify_t data; |
|||
|
|||
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); |
|||
|
|||
for (i = 0; i < 32; i++) data.msg[i] = 1 + i; |
|||
for (i = 0; i < 32; i++) data.key[i] = 33 + i; |
|||
data.siglen = 72; |
|||
CHECK(secp256k1_ecdsa_sign(data.ctx, data.msg, &sig, data.key, NULL, NULL)); |
|||
CHECK(secp256k1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); |
|||
CHECK(secp256k1_ec_pubkey_create(data.ctx, &pubkey, data.key)); |
|||
CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, 1) == 1); |
|||
|
|||
run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000); |
|||
|
|||
secp256k1_context_destroy(data.ctx); |
|||
return 0; |
|||
} |
@ -0,0 +1,20 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECDSA_ |
|||
#define _SECP256K1_ECDSA_ |
|||
|
|||
#include "scalar.h" |
|||
#include "group.h" |
|||
#include "ecmult.h" |
|||
|
|||
static int secp256k1_ecdsa_sig_parse(secp256k1_scalar_t *r, secp256k1_scalar_t *s, const unsigned char *sig, int size); |
|||
static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, int *size, const secp256k1_scalar_t *r, const secp256k1_scalar_t *s); |
|||
static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context_t *ctx, const secp256k1_scalar_t* r, const secp256k1_scalar_t* s, const secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message); |
|||
static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context_t *ctx, secp256k1_scalar_t* r, secp256k1_scalar_t* s, const secp256k1_scalar_t *seckey, const secp256k1_scalar_t *message, const secp256k1_scalar_t *nonce, int *recid); |
|||
static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context_t *ctx, const secp256k1_scalar_t* r, const secp256k1_scalar_t* s, secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message, int recid); |
|||
|
|||
#endif |
@ -0,0 +1,264 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
|
|||
#ifndef _SECP256K1_ECDSA_IMPL_H_ |
|||
#define _SECP256K1_ECDSA_IMPL_H_ |
|||
|
|||
#include "scalar.h" |
|||
#include "field.h" |
|||
#include "group.h" |
|||
#include "ecmult.h" |
|||
#include "ecmult_gen.h" |
|||
#include "ecdsa.h" |
|||
|
|||
/** Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1
|
|||
* sage: for t in xrange(1023, -1, -1): |
|||
* .. p = 2**256 - 2**32 - t |
|||
* .. if p.is_prime(): |
|||
* .. print '%x'%p |
|||
* .. break |
|||
* 'fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f' |
|||
* sage: a = 0 |
|||
* sage: b = 7 |
|||
* sage: F = FiniteField (p) |
|||
* sage: '%x' % (EllipticCurve ([F (a), F (b)]).order()) |
|||
* 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141' |
|||
*/ |
|||
static const secp256k1_fe_t secp256k1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( |
|||
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, |
|||
0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL |
|||
); |
|||
|
|||
/** Difference between field and order, values 'p' and 'n' values defined in
|
|||
* "Standards for Efficient Cryptography" (SEC2) 2.7.1. |
|||
* sage: p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F |
|||
* sage: a = 0 |
|||
* sage: b = 7 |
|||
* sage: F = FiniteField (p) |
|||
* sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order()) |
|||
* '14551231950b75fc4402da1722fc9baee' |
|||
*/ |
|||
static const secp256k1_fe_t secp256k1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( |
|||
0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL |
|||
); |
|||
|
|||
static int secp256k1_ecdsa_sig_parse(secp256k1_scalar_t *rr, secp256k1_scalar_t *rs, const unsigned char *sig, int size) { |
|||
unsigned char ra[32] = {0}, sa[32] = {0}; |
|||
const unsigned char *rp; |
|||
const unsigned char *sp; |
|||
int lenr; |
|||
int lens; |
|||
int overflow; |
|||
if (sig[0] != 0x30) { |
|||
return 0; |
|||
} |
|||
lenr = sig[3]; |
|||
if (5+lenr >= size) { |
|||
return 0; |
|||
} |
|||
lens = sig[lenr+5]; |
|||
if (sig[1] != lenr+lens+4) { |
|||
return 0; |
|||
} |
|||
if (lenr+lens+6 > size) { |
|||
return 0; |
|||
} |
|||
if (sig[2] != 0x02) { |
|||
return 0; |
|||
} |
|||
if (lenr == 0) { |
|||
return 0; |
|||
} |
|||
if (sig[lenr+4] != 0x02) { |
|||
return 0; |
|||
} |
|||
if (lens == 0) { |
|||
return 0; |
|||
} |
|||
sp = sig + 6 + lenr; |
|||
while (lens > 0 && sp[0] == 0) { |
|||
lens--; |
|||
sp++; |
|||
} |
|||
if (lens > 32) { |
|||
return 0; |
|||
} |
|||
rp = sig + 4; |
|||
while (lenr > 0 && rp[0] == 0) { |
|||
lenr--; |
|||
rp++; |
|||
} |
|||
if (lenr > 32) { |
|||
return 0; |
|||
} |
|||
memcpy(ra + 32 - lenr, rp, lenr); |
|||
memcpy(sa + 32 - lens, sp, lens); |
|||
overflow = 0; |
|||
secp256k1_scalar_set_b32(rr, ra, &overflow); |
|||
if (overflow) { |
|||
return 0; |
|||
} |
|||
secp256k1_scalar_set_b32(rs, sa, &overflow); |
|||
if (overflow) { |
|||
return 0; |
|||
} |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, int *size, const secp256k1_scalar_t* ar, const secp256k1_scalar_t* as) { |
|||
unsigned char r[33] = {0}, s[33] = {0}; |
|||
unsigned char *rp = r, *sp = s; |
|||
int lenR = 33, lenS = 33; |
|||
secp256k1_scalar_get_b32(&r[1], ar); |
|||
secp256k1_scalar_get_b32(&s[1], as); |
|||
while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; } |
|||
while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; } |
|||
if (*size < 6+lenS+lenR) { |
|||
*size = 6 + lenS + lenR; |
|||
return 0; |
|||
} |
|||
*size = 6 + lenS + lenR; |
|||
sig[0] = 0x30; |
|||
sig[1] = 4 + lenS + lenR; |
|||
sig[2] = 0x02; |
|||
sig[3] = lenR; |
|||
memcpy(sig+4, rp, lenR); |
|||
sig[4+lenR] = 0x02; |
|||
sig[5+lenR] = lenS; |
|||
memcpy(sig+lenR+6, sp, lenS); |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context_t *ctx, const secp256k1_scalar_t *sigr, const secp256k1_scalar_t *sigs, const secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message) { |
|||
unsigned char c[32]; |
|||
secp256k1_scalar_t sn, u1, u2; |
|||
secp256k1_fe_t xr; |
|||
secp256k1_gej_t pubkeyj; |
|||
secp256k1_gej_t pr; |
|||
|
|||
if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) { |
|||
return 0; |
|||
} |
|||
|
|||
secp256k1_scalar_inverse_var(&sn, sigs); |
|||
secp256k1_scalar_mul(&u1, &sn, message); |
|||
secp256k1_scalar_mul(&u2, &sn, sigr); |
|||
secp256k1_gej_set_ge(&pubkeyj, pubkey); |
|||
secp256k1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1); |
|||
if (secp256k1_gej_is_infinity(&pr)) { |
|||
return 0; |
|||
} |
|||
secp256k1_scalar_get_b32(c, sigr); |
|||
secp256k1_fe_set_b32(&xr, c); |
|||
|
|||
/** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n)
|
|||
* in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p), |
|||
* compute the remainder modulo n, and compare it to xr. However: |
|||
* |
|||
* xr == X(pr) mod n |
|||
* <=> exists h. (xr + h * n < p && xr + h * n == X(pr)) |
|||
* [Since 2 * n > p, h can only be 0 or 1] |
|||
* <=> (xr == X(pr)) || (xr + n < p && xr + n == X(pr)) |
|||
* [In Jacobian coordinates, X(pr) is pr.x / pr.z^2 mod p] |
|||
* <=> (xr == pr.x / pr.z^2 mod p) || (xr + n < p && xr + n == pr.x / pr.z^2 mod p) |
|||
* [Multiplying both sides of the equations by pr.z^2 mod p] |
|||
* <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x) |
|||
* |
|||
* Thus, we can avoid the inversion, but we have to check both cases separately. |
|||
* secp256k1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. |
|||
*/ |
|||
if (secp256k1_gej_eq_x_var(&xr, &pr)) { |
|||
/* xr.x == xr * xr.z^2 mod p, so the signature is valid. */ |
|||
return 1; |
|||
} |
|||
if (secp256k1_fe_cmp_var(&xr, &secp256k1_ecdsa_const_p_minus_order) >= 0) { |
|||
/* xr + p >= n, so we can skip testing the second case. */ |
|||
return 0; |
|||
} |
|||
secp256k1_fe_add(&xr, &secp256k1_ecdsa_const_order_as_fe); |
|||
if (secp256k1_gej_eq_x_var(&xr, &pr)) { |
|||
/* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */ |
|||
return 1; |
|||
} |
|||
return 0; |
|||
} |
|||
|
|||
static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context_t *ctx, const secp256k1_scalar_t *sigr, const secp256k1_scalar_t* sigs, secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message, int recid) { |
|||
unsigned char brx[32]; |
|||
secp256k1_fe_t fx; |
|||
secp256k1_ge_t x; |
|||
secp256k1_gej_t xj; |
|||
secp256k1_scalar_t rn, u1, u2; |
|||
secp256k1_gej_t qj; |
|||
|
|||
if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) { |
|||
return 0; |
|||
} |
|||
|
|||
secp256k1_scalar_get_b32(brx, sigr); |
|||
VERIFY_CHECK(secp256k1_fe_set_b32(&fx, brx)); /* brx comes from a scalar, so is less than the order; certainly less than p */ |
|||
if (recid & 2) { |
|||
if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) { |
|||
return 0; |
|||
} |
|||
secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe); |
|||
} |
|||
if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) { |
|||
return 0; |
|||
} |
|||
secp256k1_gej_set_ge(&xj, &x); |
|||
secp256k1_scalar_inverse_var(&rn, sigr); |
|||
secp256k1_scalar_mul(&u1, &rn, message); |
|||
secp256k1_scalar_negate(&u1, &u1); |
|||
secp256k1_scalar_mul(&u2, &rn, sigs); |
|||
secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1); |
|||
secp256k1_ge_set_gej_var(pubkey, &qj); |
|||
return !secp256k1_gej_is_infinity(&qj); |
|||
} |
|||
|
|||
static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context_t *ctx, secp256k1_scalar_t *sigr, secp256k1_scalar_t *sigs, const secp256k1_scalar_t *seckey, const secp256k1_scalar_t *message, const secp256k1_scalar_t *nonce, int *recid) { |
|||
unsigned char b[32]; |
|||
secp256k1_gej_t rp; |
|||
secp256k1_ge_t r; |
|||
secp256k1_scalar_t n; |
|||
int overflow = 0; |
|||
|
|||
secp256k1_ecmult_gen(ctx, &rp, nonce); |
|||
secp256k1_ge_set_gej(&r, &rp); |
|||
secp256k1_fe_normalize(&r.x); |
|||
secp256k1_fe_normalize(&r.y); |
|||
secp256k1_fe_get_b32(b, &r.x); |
|||
secp256k1_scalar_set_b32(sigr, b, &overflow); |
|||
if (secp256k1_scalar_is_zero(sigr)) { |
|||
/* P.x = order is on the curve, so technically sig->r could end up zero, which would be an invalid signature. */ |
|||
secp256k1_gej_clear(&rp); |
|||
secp256k1_ge_clear(&r); |
|||
return 0; |
|||
} |
|||
if (recid) { |
|||
*recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.y) ? 1 : 0); |
|||
} |
|||
secp256k1_scalar_mul(&n, sigr, seckey); |
|||
secp256k1_scalar_add(&n, &n, message); |
|||
secp256k1_scalar_inverse(sigs, nonce); |
|||
secp256k1_scalar_mul(sigs, sigs, &n); |
|||
secp256k1_scalar_clear(&n); |
|||
secp256k1_gej_clear(&rp); |
|||
secp256k1_ge_clear(&r); |
|||
if (secp256k1_scalar_is_zero(sigs)) { |
|||
return 0; |
|||
} |
|||
if (secp256k1_scalar_is_high(sigs)) { |
|||
secp256k1_scalar_negate(sigs, sigs); |
|||
if (recid) { |
|||
*recid ^= 1; |
|||
} |
|||
} |
|||
return 1; |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,26 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECKEY_ |
|||
#define _SECP256K1_ECKEY_ |
|||
|
|||
#include "group.h" |
|||
#include "scalar.h" |
|||
#include "ecmult.h" |
|||
#include "ecmult_gen.h" |
|||
|
|||
static int secp256k1_eckey_pubkey_parse(secp256k1_ge_t *elem, const unsigned char *pub, int size); |
|||
static int secp256k1_eckey_pubkey_serialize(secp256k1_ge_t *elem, unsigned char *pub, int *size, int compressed); |
|||
|
|||
static int secp256k1_eckey_privkey_parse(secp256k1_scalar_t *key, const unsigned char *privkey, int privkeylen); |
|||
static int secp256k1_eckey_privkey_serialize(const secp256k1_ecmult_gen_context_t *ctx, unsigned char *privkey, int *privkeylen, const secp256k1_scalar_t *key, int compressed); |
|||
|
|||
static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar_t *key, const secp256k1_scalar_t *tweak); |
|||
static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context_t *ctx, secp256k1_ge_t *key, const secp256k1_scalar_t *tweak); |
|||
static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar_t *key, const secp256k1_scalar_t *tweak); |
|||
static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context_t *ctx, secp256k1_ge_t *key, const secp256k1_scalar_t *tweak); |
|||
|
|||
#endif |
@ -0,0 +1,202 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECKEY_IMPL_H_ |
|||
#define _SECP256K1_ECKEY_IMPL_H_ |
|||
|
|||
#include "eckey.h" |
|||
|
|||
#include "scalar.h" |
|||
#include "field.h" |
|||
#include "group.h" |
|||
#include "ecmult_gen.h" |
|||
|
|||
static int secp256k1_eckey_pubkey_parse(secp256k1_ge_t *elem, const unsigned char *pub, int size) { |
|||
if (size == 33 && (pub[0] == 0x02 || pub[0] == 0x03)) { |
|||
secp256k1_fe_t x; |
|||
return secp256k1_fe_set_b32(&x, pub+1) && secp256k1_ge_set_xo_var(elem, &x, pub[0] == 0x03); |
|||
} else if (size == 65 && (pub[0] == 0x04 || pub[0] == 0x06 || pub[0] == 0x07)) { |
|||
secp256k1_fe_t x, y; |
|||
if (!secp256k1_fe_set_b32(&x, pub+1) || !secp256k1_fe_set_b32(&y, pub+33)) { |
|||
return 0; |
|||
} |
|||
secp256k1_ge_set_xy(elem, &x, &y); |
|||
if ((pub[0] == 0x06 || pub[0] == 0x07) && secp256k1_fe_is_odd(&y) != (pub[0] == 0x07)) { |
|||
return 0; |
|||
} |
|||
return secp256k1_ge_is_valid_var(elem); |
|||
} else { |
|||
return 0; |
|||
} |
|||
} |
|||
|
|||
static int secp256k1_eckey_pubkey_serialize(secp256k1_ge_t *elem, unsigned char *pub, int *size, int compressed) { |
|||
if (secp256k1_ge_is_infinity(elem)) { |
|||
return 0; |
|||
} |
|||
secp256k1_fe_normalize_var(&elem->x); |
|||
secp256k1_fe_normalize_var(&elem->y); |
|||
secp256k1_fe_get_b32(&pub[1], &elem->x); |
|||
if (compressed) { |
|||
*size = 33; |
|||
pub[0] = 0x02 | (secp256k1_fe_is_odd(&elem->y) ? 0x01 : 0x00); |
|||
} else { |
|||
*size = 65; |
|||
pub[0] = 0x04; |
|||
secp256k1_fe_get_b32(&pub[33], &elem->y); |
|||
} |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_eckey_privkey_parse(secp256k1_scalar_t *key, const unsigned char *privkey, int privkeylen) { |
|||
unsigned char c[32] = {0}; |
|||
const unsigned char *end = privkey + privkeylen; |
|||
int lenb = 0; |
|||
int len = 0; |
|||
int overflow = 0; |
|||
/* sequence header */ |
|||
if (end < privkey+1 || *privkey != 0x30) { |
|||
return 0; |
|||
} |
|||
privkey++; |
|||
/* sequence length constructor */ |
|||
if (end < privkey+1 || !(*privkey & 0x80)) { |
|||
return 0; |
|||
} |
|||
lenb = *privkey & ~0x80; privkey++; |
|||
if (lenb < 1 || lenb > 2) { |
|||
return 0; |
|||
} |
|||
if (end < privkey+lenb) { |
|||
return 0; |
|||
} |
|||
/* sequence length */ |
|||
len = privkey[lenb-1] | (lenb > 1 ? privkey[lenb-2] << 8 : 0); |
|||
privkey += lenb; |
|||
if (end < privkey+len) { |
|||
return 0; |
|||
} |
|||
/* sequence element 0: version number (=1) */ |
|||
if (end < privkey+3 || privkey[0] != 0x02 || privkey[1] != 0x01 || privkey[2] != 0x01) { |
|||
return 0; |
|||
} |
|||
privkey += 3; |
|||
/* sequence element 1: octet string, up to 32 bytes */ |
|||
if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1]) { |
|||
return 0; |
|||
} |
|||
memcpy(c + 32 - privkey[1], privkey + 2, privkey[1]); |
|||
secp256k1_scalar_set_b32(key, c, &overflow); |
|||
memset(c, 0, 32); |
|||
return !overflow; |
|||
} |
|||
|
|||
static int secp256k1_eckey_privkey_serialize(const secp256k1_ecmult_gen_context_t *ctx, unsigned char *privkey, int *privkeylen, const secp256k1_scalar_t *key, int compressed) { |
|||
secp256k1_gej_t rp; |
|||
secp256k1_ge_t r; |
|||
int pubkeylen = 0; |
|||
secp256k1_ecmult_gen(ctx, &rp, key); |
|||
secp256k1_ge_set_gej(&r, &rp); |
|||
if (compressed) { |
|||
static const unsigned char begin[] = { |
|||
0x30,0x81,0xD3,0x02,0x01,0x01,0x04,0x20 |
|||
}; |
|||
static const unsigned char middle[] = { |
|||
0xA0,0x81,0x85,0x30,0x81,0x82,0x02,0x01,0x01,0x30,0x2C,0x06,0x07,0x2A,0x86,0x48, |
|||
0xCE,0x3D,0x01,0x01,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F,0x30,0x06,0x04,0x01,0x00,0x04,0x01,0x07,0x04, |
|||
0x21,0x02,0x79,0xBE,0x66,0x7E,0xF9,0xDC,0xBB,0xAC,0x55,0xA0,0x62,0x95,0xCE,0x87, |
|||
0x0B,0x07,0x02,0x9B,0xFC,0xDB,0x2D,0xCE,0x28,0xD9,0x59,0xF2,0x81,0x5B,0x16,0xF8, |
|||
0x17,0x98,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFF,0xFF,0xFE,0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,0xBF,0xD2,0x5E, |
|||
0x8C,0xD0,0x36,0x41,0x41,0x02,0x01,0x01,0xA1,0x24,0x03,0x22,0x00 |
|||
}; |
|||
unsigned char *ptr = privkey; |
|||
memcpy(ptr, begin, sizeof(begin)); ptr += sizeof(begin); |
|||
secp256k1_scalar_get_b32(ptr, key); ptr += 32; |
|||
memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); |
|||
if (!secp256k1_eckey_pubkey_serialize(&r, ptr, &pubkeylen, 1)) { |
|||
return 0; |
|||
} |
|||
ptr += pubkeylen; |
|||
*privkeylen = ptr - privkey; |
|||
} else { |
|||
static const unsigned char begin[] = { |
|||
0x30,0x82,0x01,0x13,0x02,0x01,0x01,0x04,0x20 |
|||
}; |
|||
static const unsigned char middle[] = { |
|||
0xA0,0x81,0xA5,0x30,0x81,0xA2,0x02,0x01,0x01,0x30,0x2C,0x06,0x07,0x2A,0x86,0x48, |
|||
0xCE,0x3D,0x01,0x01,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F,0x30,0x06,0x04,0x01,0x00,0x04,0x01,0x07,0x04, |
|||
0x41,0x04,0x79,0xBE,0x66,0x7E,0xF9,0xDC,0xBB,0xAC,0x55,0xA0,0x62,0x95,0xCE,0x87, |
|||
0x0B,0x07,0x02,0x9B,0xFC,0xDB,0x2D,0xCE,0x28,0xD9,0x59,0xF2,0x81,0x5B,0x16,0xF8, |
|||
0x17,0x98,0x48,0x3A,0xDA,0x77,0x26,0xA3,0xC4,0x65,0x5D,0xA4,0xFB,0xFC,0x0E,0x11, |
|||
0x08,0xA8,0xFD,0x17,0xB4,0x48,0xA6,0x85,0x54,0x19,0x9C,0x47,0xD0,0x8F,0xFB,0x10, |
|||
0xD4,0xB8,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFF,0xFF,0xFE,0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,0xBF,0xD2,0x5E, |
|||
0x8C,0xD0,0x36,0x41,0x41,0x02,0x01,0x01,0xA1,0x44,0x03,0x42,0x00 |
|||
}; |
|||
unsigned char *ptr = privkey; |
|||
memcpy(ptr, begin, sizeof(begin)); ptr += sizeof(begin); |
|||
secp256k1_scalar_get_b32(ptr, key); ptr += 32; |
|||
memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); |
|||
if (!secp256k1_eckey_pubkey_serialize(&r, ptr, &pubkeylen, 0)) { |
|||
return 0; |
|||
} |
|||
ptr += pubkeylen; |
|||
*privkeylen = ptr - privkey; |
|||
} |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar_t *key, const secp256k1_scalar_t *tweak) { |
|||
secp256k1_scalar_add(key, key, tweak); |
|||
if (secp256k1_scalar_is_zero(key)) { |
|||
return 0; |
|||
} |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context_t *ctx, secp256k1_ge_t *key, const secp256k1_scalar_t *tweak) { |
|||
secp256k1_gej_t pt; |
|||
secp256k1_scalar_t one; |
|||
secp256k1_gej_set_ge(&pt, key); |
|||
secp256k1_scalar_set_int(&one, 1); |
|||
secp256k1_ecmult(ctx, &pt, &pt, &one, tweak); |
|||
|
|||
if (secp256k1_gej_is_infinity(&pt)) { |
|||
return 0; |
|||
} |
|||
secp256k1_ge_set_gej(key, &pt); |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar_t *key, const secp256k1_scalar_t *tweak) { |
|||
if (secp256k1_scalar_is_zero(tweak)) { |
|||
return 0; |
|||
} |
|||
|
|||
secp256k1_scalar_mul(key, key, tweak); |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context_t *ctx, secp256k1_ge_t *key, const secp256k1_scalar_t *tweak) { |
|||
secp256k1_scalar_t zero; |
|||
secp256k1_gej_t pt; |
|||
if (secp256k1_scalar_is_zero(tweak)) { |
|||
return 0; |
|||
} |
|||
|
|||
secp256k1_scalar_set_int(&zero, 0); |
|||
secp256k1_gej_set_ge(&pt, key); |
|||
secp256k1_ecmult(ctx, &pt, &pt, tweak, &zero); |
|||
secp256k1_ge_set_gej(key, &pt); |
|||
return 1; |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,31 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECMULT_ |
|||
#define _SECP256K1_ECMULT_ |
|||
|
|||
#include "num.h" |
|||
#include "group.h" |
|||
|
|||
typedef struct { |
|||
/* For accelerating the computation of a*P + b*G: */ |
|||
secp256k1_ge_storage_t (*pre_g)[]; /* odd multiples of the generator */ |
|||
#ifdef USE_ENDOMORPHISM |
|||
secp256k1_ge_storage_t (*pre_g_128)[]; /* odd multiples of 2^128*generator */ |
|||
#endif |
|||
} secp256k1_ecmult_context_t; |
|||
|
|||
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context_t *ctx); |
|||
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context_t *ctx, const callback_t *cb); |
|||
static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context_t *dst, |
|||
const secp256k1_ecmult_context_t *src, const callback_t *cb); |
|||
static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context_t *ctx); |
|||
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context_t *ctx); |
|||
|
|||
/** Double multiply: R = na*A + ng*G */ |
|||
static void secp256k1_ecmult(const secp256k1_ecmult_context_t *ctx, secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_scalar_t *na, const secp256k1_scalar_t *ng); |
|||
|
|||
#endif |
@ -0,0 +1,15 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2015 Andrew Poelstra * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECMULT_CONST_ |
|||
#define _SECP256K1_ECMULT_CONST_ |
|||
|
|||
#include "scalar.h" |
|||
#include "group.h" |
|||
|
|||
static void secp256k1_ecmult_const(secp256k1_gej_t *r, const secp256k1_ge_t *a, const secp256k1_scalar_t *q); |
|||
|
|||
#endif |
@ -0,0 +1,258 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2015 Pieter Wuille, Andrew Poelstra * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECMULT_CONST_IMPL_ |
|||
#define _SECP256K1_ECMULT_CONST_IMPL_ |
|||
|
|||
#include "scalar.h" |
|||
#include "group.h" |
|||
#include "ecmult_const.h" |
|||
#include "ecmult_impl.h" |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
#define WNAF_BITS 128 |
|||
#else |
|||
#define WNAF_BITS 256 |
|||
#endif |
|||
#define WNAF_SIZE(w) ((WNAF_BITS + (w) - 1) / (w)) |
|||
|
|||
/* This is like `ECMULT_TABLE_GET_GE` but is constant time */ |
|||
#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \ |
|||
int m; \ |
|||
int abs_n = (n) * (((n) > 0) * 2 - 1); \ |
|||
int idx_n = abs_n / 2; \ |
|||
secp256k1_fe_t neg_y; \ |
|||
VERIFY_CHECK(((n) & 1) == 1); \ |
|||
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ |
|||
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ |
|||
VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \ |
|||
VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \ |
|||
for (m = 0; m < ECMULT_TABLE_SIZE(w); m++) { \ |
|||
/* This loop is used to avoid secret data in array indices. See
|
|||
* the comment in ecmult_gen_impl.h for rationale. */ \ |
|||
secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ |
|||
secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ |
|||
} \ |
|||
(r)->infinity = 0; \ |
|||
secp256k1_fe_negate(&neg_y, &(r)->y, 1); \ |
|||
secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ |
|||
} while(0) |
|||
|
|||
|
|||
/** Convert a number to WNAF notation. The number becomes represented by sum(2^{wi} * wnaf[i], i=0..return_val)
|
|||
* with the following guarantees: |
|||
* - each wnaf[i] an odd integer between -(1 << w) and (1 << w) |
|||
* - each wnaf[i] is nonzero |
|||
* - the number of words set is returned; this is always (WNAF_BITS + w - 1) / w |
|||
* |
|||
* Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar |
|||
* Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.) |
|||
* CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlagy Berlin Heidelberg 2003 |
|||
* |
|||
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 |
|||
*/ |
|||
static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar_t s, int w) { |
|||
int global_sign = 1; |
|||
int skew = 0; |
|||
int word = 0; |
|||
/* 1 2 3 */ |
|||
int u_last; |
|||
int u; |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
/* If we are using the endomorphism, we cannot handle even numbers by negating
|
|||
* them, since we are working with 128-bit numbers whose negations would be 256 |
|||
* bits, eliminating the performance advantage. Instead we use a technique from |
|||
* Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even) |
|||
* or 2 (for odd) to the number we are encoding, then compensating after the |
|||
* multiplication. */ |
|||
/* Negative 128-bit numbers will be negated, since otherwise they are 256-bit */ |
|||
int flip = secp256k1_scalar_is_high(&s); |
|||
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ |
|||
int bit = flip ^ (s.d[0] & 1); |
|||
/* We check for negative one, since adding 2 to it will cause an overflow */ |
|||
secp256k1_scalar_t neg_s; |
|||
int not_neg_one; |
|||
secp256k1_scalar_negate(&neg_s, &s); |
|||
not_neg_one = !secp256k1_scalar_is_one(&neg_s); |
|||
secp256k1_scalar_cadd_bit(&s, bit, not_neg_one); |
|||
/* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
|
|||
* that we added two to it and flipped it. In fact for -1 these operations are |
|||
* identical. We only flipped, but since skewing is required (in the sense that |
|||
* the skew must be 1 or 2, never zero) and flipping is not, we need to change |
|||
* our flags to claim that we only skewed. */ |
|||
global_sign = secp256k1_scalar_cond_negate(&s, flip); |
|||
global_sign *= not_neg_one * 2 - 1; |
|||
skew = 1 << bit; |
|||
#else |
|||
/* Otherwise, we just negate to force oddness */ |
|||
int is_even = secp256k1_scalar_is_even(&s); |
|||
global_sign = secp256k1_scalar_cond_negate(&s, is_even); |
|||
#endif |
|||
|
|||
/* 4 */ |
|||
u_last = secp256k1_scalar_shr_int(&s, w); |
|||
while (word * w < WNAF_BITS) { |
|||
int sign; |
|||
int even; |
|||
|
|||
/* 4.1 4.4 */ |
|||
u = secp256k1_scalar_shr_int(&s, w); |
|||
/* 4.2 */ |
|||
even = ((u & 1) == 0); |
|||
sign = 2 * (u_last > 0) - 1; |
|||
u += sign * even; |
|||
u_last -= sign * even * (1 << w); |
|||
|
|||
/* 4.3, adapted for global sign change */ |
|||
wnaf[word++] = u_last * global_sign; |
|||
|
|||
u_last = u; |
|||
} |
|||
wnaf[word] = u * global_sign; |
|||
|
|||
VERIFY_CHECK(secp256k1_scalar_is_zero(&s)); |
|||
VERIFY_CHECK(word == WNAF_SIZE(w)); |
|||
return skew; |
|||
} |
|||
|
|||
|
|||
static void secp256k1_ecmult_const(secp256k1_gej_t *r, const secp256k1_ge_t *a, const secp256k1_scalar_t *scalar) { |
|||
secp256k1_ge_t pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; |
|||
secp256k1_ge_t tmpa; |
|||
secp256k1_fe_t Z; |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
secp256k1_ge_t pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; |
|||
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; |
|||
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; |
|||
int skew_1; |
|||
int skew_lam; |
|||
secp256k1_scalar_t q_1, q_lam; |
|||
#else |
|||
int wnaf[1 + WNAF_SIZE(WINDOW_A - 1)]; |
|||
#endif |
|||
|
|||
int i; |
|||
secp256k1_scalar_t sc = *scalar; |
|||
|
|||
/* build wnaf representation for q. */ |
|||
#ifdef USE_ENDOMORPHISM |
|||
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ |
|||
secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc); |
|||
/* no need for zero correction when using endomorphism since even
|
|||
* numbers have one added to them anyway */ |
|||
skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1); |
|||
skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1); |
|||
#else |
|||
int is_zero = secp256k1_scalar_is_zero(scalar); |
|||
/* the wNAF ladder cannot handle zero, so bump this to one .. we will
|
|||
* correct the result after the fact */ |
|||
sc.d[0] += is_zero; |
|||
VERIFY_CHECK(!secp256k1_scalar_is_zero(&sc)); |
|||
|
|||
secp256k1_wnaf_const(wnaf, sc, WINDOW_A - 1); |
|||
#endif |
|||
|
|||
/* Calculate odd multiples of a.
|
|||
* All multiples are brought to the same Z 'denominator', which is stored |
|||
* in Z. Due to secp256k1' isomorphism we can do all operations pretending |
|||
* that the Z coordinate was 1, use affine addition formulae, and correct |
|||
* the Z coordinate of the result once at the end. |
|||
*/ |
|||
secp256k1_gej_set_ge(r, a); |
|||
secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); |
|||
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { |
|||
secp256k1_fe_normalize_weak(&pre_a[i].y); |
|||
} |
|||
#ifdef USE_ENDOMORPHISM |
|||
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { |
|||
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); |
|||
} |
|||
#endif |
|||
|
|||
/* first loop iteration (separated out so we can directly set r, rather
|
|||
* than having it start at infinity, get doubled several times, then have |
|||
* its new value added to it) */ |
|||
#ifdef USE_ENDOMORPHISM |
|||
i = wnaf_1[WNAF_SIZE(WINDOW_A - 1)]; |
|||
VERIFY_CHECK(i != 0); |
|||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); |
|||
secp256k1_gej_set_ge(r, &tmpa); |
|||
|
|||
i = wnaf_lam[WNAF_SIZE(WINDOW_A - 1)]; |
|||
VERIFY_CHECK(i != 0); |
|||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); |
|||
secp256k1_gej_add_ge(r, r, &tmpa); |
|||
#else |
|||
i = wnaf[WNAF_SIZE(WINDOW_A - 1)]; |
|||
VERIFY_CHECK(i != 0); |
|||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); |
|||
secp256k1_gej_set_ge(r, &tmpa); |
|||
#endif |
|||
/* remaining loop iterations */ |
|||
for (i = WNAF_SIZE(WINDOW_A - 1) - 1; i >= 0; i--) { |
|||
int n; |
|||
int j; |
|||
for (j = 0; j < WINDOW_A - 1; ++j) { |
|||
secp256k1_gej_double_nonzero(r, r, NULL); |
|||
} |
|||
#ifdef USE_ENDOMORPHISM |
|||
n = wnaf_1[i]; |
|||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); |
|||
VERIFY_CHECK(n != 0); |
|||
secp256k1_gej_add_ge(r, r, &tmpa); |
|||
|
|||
n = wnaf_lam[i]; |
|||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); |
|||
VERIFY_CHECK(n != 0); |
|||
secp256k1_gej_add_ge(r, r, &tmpa); |
|||
#else |
|||
n = wnaf[i]; |
|||
VERIFY_CHECK(n != 0); |
|||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); |
|||
secp256k1_gej_add_ge(r, r, &tmpa); |
|||
#endif |
|||
} |
|||
|
|||
secp256k1_fe_mul(&r->z, &r->z, &Z); |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
{ |
|||
/* Correct for wNAF skew */ |
|||
secp256k1_ge_t correction = *a; |
|||
secp256k1_ge_storage_t correction_1_stor; |
|||
secp256k1_ge_storage_t correction_lam_stor; |
|||
secp256k1_ge_storage_t a2_stor; |
|||
secp256k1_gej_t tmpj; |
|||
secp256k1_gej_set_ge(&tmpj, &correction); |
|||
secp256k1_gej_double_var(&tmpj, &tmpj, NULL); |
|||
secp256k1_ge_set_gej(&correction, &tmpj); |
|||
secp256k1_ge_to_storage(&correction_1_stor, a); |
|||
secp256k1_ge_to_storage(&correction_lam_stor, a); |
|||
secp256k1_ge_to_storage(&a2_stor, &correction); |
|||
|
|||
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ |
|||
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); |
|||
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); |
|||
|
|||
/* Apply the correction */ |
|||
secp256k1_ge_from_storage(&correction, &correction_1_stor); |
|||
secp256k1_ge_neg(&correction, &correction); |
|||
secp256k1_gej_add_ge(r, r, &correction); |
|||
|
|||
secp256k1_ge_from_storage(&correction, &correction_lam_stor); |
|||
secp256k1_ge_neg(&correction, &correction); |
|||
secp256k1_ge_mul_lambda(&correction, &correction); |
|||
secp256k1_gej_add_ge(r, r, &correction); |
|||
} |
|||
#else |
|||
/* correct for zero */ |
|||
r->infinity |= is_zero; |
|||
#endif |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,43 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECMULT_GEN_ |
|||
#define _SECP256K1_ECMULT_GEN_ |
|||
|
|||
#include "scalar.h" |
|||
#include "group.h" |
|||
|
|||
typedef struct { |
|||
/* For accelerating the computation of a*G:
|
|||
* To harden against timing attacks, use the following mechanism: |
|||
* * Break up the multiplicand into groups of 4 bits, called n_0, n_1, n_2, ..., n_63. |
|||
* * Compute sum(n_i * 16^i * G + U_i, i=0..63), where: |
|||
* * U_i = U * 2^i (for i=0..62) |
|||
* * U_i = U * (1-2^63) (for i=63) |
|||
* where U is a point with no known corresponding scalar. Note that sum(U_i, i=0..63) = 0. |
|||
* For each i, and each of the 16 possible values of n_i, (n_i * 16^i * G + U_i) is |
|||
* precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63). |
|||
* None of the resulting prec group elements have a known scalar, and neither do any of |
|||
* the intermediate sums while computing a*G. |
|||
*/ |
|||
secp256k1_ge_storage_t (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */ |
|||
secp256k1_scalar_t blind; |
|||
secp256k1_gej_t initial; |
|||
} secp256k1_ecmult_gen_context_t; |
|||
|
|||
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context_t* ctx); |
|||
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context_t* ctx, const callback_t* cb); |
|||
static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context_t *dst, |
|||
const secp256k1_ecmult_gen_context_t* src, const callback_t* cb); |
|||
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context_t* ctx); |
|||
static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context_t* ctx); |
|||
|
|||
/** Multiply with the generator: R = a*G */ |
|||
static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context_t* ctx, secp256k1_gej_t *r, const secp256k1_scalar_t *a); |
|||
|
|||
static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context_t *ctx, const unsigned char *seed32); |
|||
|
|||
#endif |
@ -0,0 +1,205 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECMULT_GEN_IMPL_H_ |
|||
#define _SECP256K1_ECMULT_GEN_IMPL_H_ |
|||
|
|||
#include "scalar.h" |
|||
#include "group.h" |
|||
#include "ecmult_gen.h" |
|||
#include "hash_impl.h" |
|||
#ifdef USE_ECMULT_STATIC_PRECOMPUTATION |
|||
#include "ecmult_static_context.h" |
|||
#endif |
|||
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context_t *ctx) { |
|||
ctx->prec = NULL; |
|||
} |
|||
|
|||
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context_t *ctx, const callback_t* cb) { |
|||
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION |
|||
secp256k1_ge_t prec[1024]; |
|||
secp256k1_gej_t gj; |
|||
secp256k1_gej_t nums_gej; |
|||
int i, j; |
|||
#endif |
|||
|
|||
if (ctx->prec != NULL) { |
|||
return; |
|||
} |
|||
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION |
|||
ctx->prec = (secp256k1_ge_storage_t (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec)); |
|||
|
|||
/* get the generator */ |
|||
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); |
|||
|
|||
/* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ |
|||
{ |
|||
static const unsigned char nums_b32[33] = "The scalar for this x is unknown"; |
|||
secp256k1_fe_t nums_x; |
|||
secp256k1_ge_t nums_ge; |
|||
VERIFY_CHECK(secp256k1_fe_set_b32(&nums_x, nums_b32)); |
|||
VERIFY_CHECK(secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0)); |
|||
secp256k1_gej_set_ge(&nums_gej, &nums_ge); |
|||
/* Add G to make the bits in x uniformly distributed. */ |
|||
secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL); |
|||
} |
|||
|
|||
/* compute prec. */ |
|||
{ |
|||
secp256k1_gej_t precj[1024]; /* Jacobian versions of prec. */ |
|||
secp256k1_gej_t gbase; |
|||
secp256k1_gej_t numsbase; |
|||
gbase = gj; /* 16^j * G */ |
|||
numsbase = nums_gej; /* 2^j * nums. */ |
|||
for (j = 0; j < 64; j++) { |
|||
/* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */ |
|||
precj[j*16] = numsbase; |
|||
for (i = 1; i < 16; i++) { |
|||
secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL); |
|||
} |
|||
/* Multiply gbase by 16. */ |
|||
for (i = 0; i < 4; i++) { |
|||
secp256k1_gej_double_var(&gbase, &gbase, NULL); |
|||
} |
|||
/* Multiply numbase by 2. */ |
|||
secp256k1_gej_double_var(&numsbase, &numsbase, NULL); |
|||
if (j == 62) { |
|||
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ |
|||
secp256k1_gej_neg(&numsbase, &numsbase); |
|||
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); |
|||
} |
|||
} |
|||
secp256k1_ge_set_all_gej_var(1024, prec, precj, cb); |
|||
} |
|||
for (j = 0; j < 64; j++) { |
|||
for (i = 0; i < 16; i++) { |
|||
secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]); |
|||
} |
|||
} |
|||
#else |
|||
(void)cb; |
|||
ctx->prec = (secp256k1_ge_storage_t (*)[64][16])secp256k1_ecmult_static_context; |
|||
#endif |
|||
secp256k1_ecmult_gen_blind(ctx, NULL); |
|||
} |
|||
|
|||
static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context_t* ctx) { |
|||
return ctx->prec != NULL; |
|||
} |
|||
|
|||
static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context_t *dst, |
|||
const secp256k1_ecmult_gen_context_t *src, const callback_t* cb) { |
|||
if (src->prec == NULL) { |
|||
dst->prec = NULL; |
|||
} else { |
|||
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION |
|||
dst->prec = (secp256k1_ge_storage_t (*)[64][16])checked_malloc(cb, sizeof(*dst->prec)); |
|||
memcpy(dst->prec, src->prec, sizeof(*dst->prec)); |
|||
#else |
|||
(void)cb; |
|||
dst->prec = src->prec; |
|||
#endif |
|||
dst->initial = src->initial; |
|||
dst->blind = src->blind; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context_t *ctx) { |
|||
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION |
|||
free(ctx->prec); |
|||
#endif |
|||
secp256k1_scalar_clear(&ctx->blind); |
|||
secp256k1_gej_clear(&ctx->initial); |
|||
ctx->prec = NULL; |
|||
} |
|||
|
|||
static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context_t *ctx, secp256k1_gej_t *r, const secp256k1_scalar_t *gn) { |
|||
secp256k1_ge_t add; |
|||
secp256k1_ge_storage_t adds; |
|||
secp256k1_scalar_t gnb; |
|||
int bits; |
|||
int i, j; |
|||
memset(&adds, 0, sizeof(adds)); |
|||
*r = ctx->initial; |
|||
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ |
|||
secp256k1_scalar_add(&gnb, gn, &ctx->blind); |
|||
add.infinity = 0; |
|||
for (j = 0; j < 64; j++) { |
|||
bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4); |
|||
for (i = 0; i < 16; i++) { |
|||
/** This uses a conditional move to avoid any secret data in array indexes.
|
|||
* _Any_ use of secret indexes has been demonstrated to result in timing |
|||
* sidechannels, even when the cache-line access patterns are uniform. |
|||
* See also: |
|||
* "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe |
|||
* (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
|
|||
* "Cache Attacks and Countermeasures: the Case of AES", RSA 2006, |
|||
* by Dag Arne Osvik, Adi Shamir, and Eran Tromer |
|||
* (http://www.tau.ac.il/~tromer/papers/cache.pdf)
|
|||
*/ |
|||
secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); |
|||
} |
|||
secp256k1_ge_from_storage(&add, &adds); |
|||
secp256k1_gej_add_ge(r, r, &add); |
|||
} |
|||
bits = 0; |
|||
secp256k1_ge_clear(&add); |
|||
secp256k1_scalar_clear(&gnb); |
|||
} |
|||
|
|||
/* Setup blinding values for secp256k1_ecmult_gen. */ |
|||
static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context_t *ctx, const unsigned char *seed32) { |
|||
secp256k1_scalar_t b; |
|||
secp256k1_gej_t gb; |
|||
secp256k1_fe_t s; |
|||
unsigned char nonce32[32]; |
|||
secp256k1_rfc6979_hmac_sha256_t rng; |
|||
int retry; |
|||
unsigned char keydata[64] = {0}; |
|||
if (!seed32) { |
|||
/* When seed is NULL, reset the initial point and blinding value. */ |
|||
secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g); |
|||
secp256k1_gej_neg(&ctx->initial, &ctx->initial); |
|||
secp256k1_scalar_set_int(&ctx->blind, 1); |
|||
} |
|||
/* The prior blinding value (if not reset) is chained forward by including it in the hash. */ |
|||
secp256k1_scalar_get_b32(nonce32, &ctx->blind); |
|||
/** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
|
|||
* and guards against weak or adversarial seeds. This is a simpler and safer interface than |
|||
* asking the caller for blinding values directly and expecting them to retry on failure. |
|||
*/ |
|||
memcpy(keydata, nonce32, 32); |
|||
if (seed32) { |
|||
memcpy(keydata + 32, seed32, 32); |
|||
} |
|||
secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32); |
|||
memset(keydata, 0, sizeof(keydata)); |
|||
/* Retry for out of range results to achieve uniformity. */ |
|||
do { |
|||
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); |
|||
retry = !secp256k1_fe_set_b32(&s, nonce32); |
|||
retry |= secp256k1_fe_is_zero(&s); |
|||
} while (retry); |
|||
/* Randomize the projection to defend against multiplier sidechannels. */ |
|||
secp256k1_gej_rescale(&ctx->initial, &s); |
|||
secp256k1_fe_clear(&s); |
|||
do { |
|||
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); |
|||
secp256k1_scalar_set_b32(&b, nonce32, &retry); |
|||
/* A blinding value of 0 works, but would undermine the projection hardening. */ |
|||
retry |= secp256k1_scalar_is_zero(&b); |
|||
} while (retry); |
|||
secp256k1_rfc6979_hmac_sha256_finalize(&rng); |
|||
memset(nonce32, 0, 32); |
|||
secp256k1_ecmult_gen(ctx, &gb, &b); |
|||
secp256k1_scalar_negate(&b, &b); |
|||
ctx->blind = b; |
|||
ctx->initial = gb; |
|||
secp256k1_scalar_clear(&b); |
|||
secp256k1_gej_clear(&gb); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,389 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_ECMULT_IMPL_H_ |
|||
#define _SECP256K1_ECMULT_IMPL_H_ |
|||
|
|||
#include "group.h" |
|||
#include "scalar.h" |
|||
#include "ecmult.h" |
|||
|
|||
/* optimal for 128-bit and 256-bit exponents. */ |
|||
#define WINDOW_A 5 |
|||
|
|||
/** larger numbers may result in slightly better performance, at the cost of
|
|||
exponentially larger precomputed tables. */ |
|||
#ifdef USE_ENDOMORPHISM |
|||
/** Two tables for window size 15: 1.375 MiB. */ |
|||
#define WINDOW_G 15 |
|||
#else |
|||
/** One table for window size 16: 1.375 MiB. */ |
|||
#define WINDOW_G 16 |
|||
#endif |
|||
|
|||
/** The number of entries a table with precomputed multiples needs to have. */ |
|||
#define ECMULT_TABLE_SIZE(w) (1 << ((w)-2)) |
|||
|
|||
/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain
|
|||
* the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will |
|||
* contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z. |
|||
* Prej's Z values are undefined, except for the last value. |
|||
*/ |
|||
static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej_t *prej, secp256k1_fe_t *zr, const secp256k1_gej_t *a) { |
|||
secp256k1_gej_t d; |
|||
secp256k1_ge_t a_ge, d_ge; |
|||
int i; |
|||
|
|||
VERIFY_CHECK(!a->infinity); |
|||
|
|||
secp256k1_gej_double_var(&d, a, NULL); |
|||
|
|||
/*
|
|||
* Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate |
|||
* of 'd', and scale the 1P starting value's x/y coordinates without changing its z. |
|||
*/ |
|||
d_ge.x = d.x; |
|||
d_ge.y = d.y; |
|||
d_ge.infinity = 0; |
|||
|
|||
secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z); |
|||
prej[0].x = a_ge.x; |
|||
prej[0].y = a_ge.y; |
|||
prej[0].z = a->z; |
|||
prej[0].infinity = 0; |
|||
|
|||
zr[0] = d.z; |
|||
for (i = 1; i < n; i++) { |
|||
secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); |
|||
} |
|||
|
|||
/*
|
|||
* Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only |
|||
* the final point's z coordinate is actually used though, so just update that. |
|||
*/ |
|||
secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); |
|||
} |
|||
|
|||
/** Fill a table 'pre' with precomputed odd multiples of a.
|
|||
* |
|||
* There are two versions of this function: |
|||
* - secp256k1_ecmult_odd_multiples_table_globalz_windowa which brings its |
|||
* resulting point set to a single constant Z denominator, stores the X and Y |
|||
* coordinates as ge_storage points in pre, and stores the global Z in rz. |
|||
* It only operates on tables sized for WINDOW_A wnaf multiples. |
|||
* - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its |
|||
* resulting point set to actually affine points, and stores those in pre. |
|||
* It operates on tables of any size, but uses heap-allocated temporaries. |
|||
* |
|||
* To compute a*P + b*G, we compute a table for P using the first function, |
|||
* and for G using the second (which requires an inverse, but it only needs to |
|||
* happen once). |
|||
*/ |
|||
static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge_t *pre, secp256k1_fe_t *globalz, const secp256k1_gej_t *a) { |
|||
secp256k1_gej_t prej[ECMULT_TABLE_SIZE(WINDOW_A)]; |
|||
secp256k1_fe_t zr[ECMULT_TABLE_SIZE(WINDOW_A)]; |
|||
|
|||
/* Compute the odd multiples in Jacobian form. */ |
|||
secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); |
|||
/* Bring them to the same Z denominator. */ |
|||
secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); |
|||
} |
|||
|
|||
static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge_storage_t *pre, const secp256k1_gej_t *a, const callback_t *cb) { |
|||
secp256k1_gej_t *prej = (secp256k1_gej_t*)checked_malloc(cb, sizeof(secp256k1_gej_t) * n); |
|||
secp256k1_ge_t *prea = (secp256k1_ge_t*)checked_malloc(cb, sizeof(secp256k1_ge_t) * n); |
|||
secp256k1_fe_t *zr = (secp256k1_fe_t*)checked_malloc(cb, sizeof(secp256k1_fe_t) * n); |
|||
int i; |
|||
|
|||
/* Compute the odd multiples in Jacobian form. */ |
|||
secp256k1_ecmult_odd_multiples_table(n, prej, zr, a); |
|||
/* Convert them in batch to affine coordinates. */ |
|||
secp256k1_ge_set_table_gej_var(n, prea, prej, zr); |
|||
/* Convert them to compact storage form. */ |
|||
for (i = 0; i < n; i++) { |
|||
secp256k1_ge_to_storage(&pre[i], &prea[i]); |
|||
} |
|||
|
|||
free(prea); |
|||
free(prej); |
|||
free(zr); |
|||
} |
|||
|
|||
/** The following two macro retrieves a particular odd multiple from a table
|
|||
* of precomputed multiples. */ |
|||
#define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \ |
|||
VERIFY_CHECK(((n) & 1) == 1); \ |
|||
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ |
|||
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ |
|||
if ((n) > 0) { \ |
|||
*(r) = (pre)[((n)-1)/2]; \ |
|||
} else { \ |
|||
secp256k1_ge_neg((r), &(pre)[(-(n)-1)/2]); \ |
|||
} \ |
|||
} while(0) |
|||
|
|||
#define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \ |
|||
VERIFY_CHECK(((n) & 1) == 1); \ |
|||
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ |
|||
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ |
|||
if ((n) > 0) { \ |
|||
secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \ |
|||
} else { \ |
|||
secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ |
|||
secp256k1_ge_neg((r), (r)); \ |
|||
} \ |
|||
} while(0) |
|||
|
|||
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context_t *ctx) { |
|||
ctx->pre_g = NULL; |
|||
#ifdef USE_ENDOMORPHISM |
|||
ctx->pre_g_128 = NULL; |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context_t *ctx, const callback_t *cb) { |
|||
secp256k1_gej_t gj; |
|||
|
|||
if (ctx->pre_g != NULL) { |
|||
return; |
|||
} |
|||
|
|||
/* get the generator */ |
|||
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); |
|||
|
|||
ctx->pre_g = (secp256k1_ge_storage_t (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)); |
|||
|
|||
/* precompute the tables with odd multiples */ |
|||
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj, cb); |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
{ |
|||
secp256k1_gej_t g_128j; |
|||
int i; |
|||
|
|||
ctx->pre_g_128 = (secp256k1_ge_storage_t (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)); |
|||
|
|||
/* calculate 2^128*generator */ |
|||
g_128j = gj; |
|||
for (i = 0; i < 128; i++) { |
|||
secp256k1_gej_double_var(&g_128j, &g_128j, NULL); |
|||
} |
|||
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j, cb); |
|||
} |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context_t *dst, |
|||
const secp256k1_ecmult_context_t *src, const callback_t *cb) { |
|||
if (src->pre_g == NULL) { |
|||
dst->pre_g = NULL; |
|||
} else { |
|||
size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G); |
|||
dst->pre_g = (secp256k1_ge_storage_t (*)[])checked_malloc(cb, size); |
|||
memcpy(dst->pre_g, src->pre_g, size); |
|||
} |
|||
#ifdef USE_ENDOMORPHISM |
|||
if (src->pre_g_128 == NULL) { |
|||
dst->pre_g_128 = NULL; |
|||
} else { |
|||
size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G); |
|||
dst->pre_g_128 = (secp256k1_ge_storage_t (*)[])checked_malloc(cb, size); |
|||
memcpy(dst->pre_g_128, src->pre_g_128, size); |
|||
} |
|||
#endif |
|||
} |
|||
|
|||
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context_t *ctx) { |
|||
return ctx->pre_g != NULL; |
|||
} |
|||
|
|||
static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context_t *ctx) { |
|||
free(ctx->pre_g); |
|||
#ifdef USE_ENDOMORPHISM |
|||
free(ctx->pre_g_128); |
|||
#endif |
|||
secp256k1_ecmult_context_init(ctx); |
|||
} |
|||
|
|||
/** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits),
|
|||
* with the following guarantees: |
|||
* - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1) |
|||
* - two non-zero entries in wnaf are separated by at least w-1 zeroes. |
|||
* - the number of set values in wnaf is returned. This number is at most 256, and at most one more |
|||
* than the number of bits in the (absolute value) of the input. |
|||
*/ |
|||
static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar_t *a, int w) { |
|||
secp256k1_scalar_t s = *a; |
|||
int last_set_bit = -1; |
|||
int bit = 0; |
|||
int sign = 1; |
|||
int carry = 0; |
|||
|
|||
VERIFY_CHECK(wnaf != NULL); |
|||
VERIFY_CHECK(0 <= len && len <= 256); |
|||
VERIFY_CHECK(a != NULL); |
|||
VERIFY_CHECK(2 <= w && w <= 31); |
|||
|
|||
memset(wnaf, 0, len * sizeof(wnaf[0])); |
|||
|
|||
if (secp256k1_scalar_get_bits(&s, 255, 1)) { |
|||
secp256k1_scalar_negate(&s, &s); |
|||
sign = -1; |
|||
} |
|||
|
|||
while (bit < len) { |
|||
int now; |
|||
int word; |
|||
if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { |
|||
bit++; |
|||
continue; |
|||
} |
|||
|
|||
now = w; |
|||
if (now > len - bit) { |
|||
now = len - bit; |
|||
} |
|||
|
|||
word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry; |
|||
|
|||
carry = (word >> (w-1)) & 1; |
|||
word -= carry << w; |
|||
|
|||
wnaf[bit] = sign * word; |
|||
last_set_bit = bit; |
|||
|
|||
bit += now; |
|||
} |
|||
#ifdef VERIFY |
|||
CHECK(carry == 0); |
|||
while (bit < 256) { |
|||
CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0); |
|||
} |
|||
#endif |
|||
return last_set_bit + 1; |
|||
} |
|||
|
|||
static void secp256k1_ecmult(const secp256k1_ecmult_context_t *ctx, secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_scalar_t *na, const secp256k1_scalar_t *ng) { |
|||
secp256k1_ge_t pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; |
|||
secp256k1_ge_t tmpa; |
|||
secp256k1_fe_t Z; |
|||
#ifdef USE_ENDOMORPHISM |
|||
secp256k1_ge_t pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; |
|||
secp256k1_scalar_t na_1, na_lam; |
|||
/* Splitted G factors. */ |
|||
secp256k1_scalar_t ng_1, ng_128; |
|||
int wnaf_na_1[130]; |
|||
int wnaf_na_lam[130]; |
|||
int bits_na_1; |
|||
int bits_na_lam; |
|||
int wnaf_ng_1[129]; |
|||
int bits_ng_1; |
|||
int wnaf_ng_128[129]; |
|||
int bits_ng_128; |
|||
#else |
|||
int wnaf_na[256]; |
|||
int bits_na; |
|||
int wnaf_ng[256]; |
|||
int bits_ng; |
|||
#endif |
|||
int i; |
|||
int bits; |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
/* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ |
|||
secp256k1_scalar_split_lambda(&na_1, &na_lam, na); |
|||
|
|||
/* build wnaf representation for na_1 and na_lam. */ |
|||
bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, 130, &na_1, WINDOW_A); |
|||
bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, 130, &na_lam, WINDOW_A); |
|||
VERIFY_CHECK(bits_na_1 <= 130); |
|||
VERIFY_CHECK(bits_na_lam <= 130); |
|||
bits = bits_na_1; |
|||
if (bits_na_lam > bits) { |
|||
bits = bits_na_lam; |
|||
} |
|||
#else |
|||
/* build wnaf representation for na. */ |
|||
bits_na = secp256k1_ecmult_wnaf(wnaf_na, 256, na, WINDOW_A); |
|||
bits = bits_na; |
|||
#endif |
|||
|
|||
/* Calculate odd multiples of a.
|
|||
* All multiples are brought to the same Z 'denominator', which is stored |
|||
* in Z. Due to secp256k1' isomorphism we can do all operations pretending |
|||
* that the Z coordinate was 1, use affine addition formulae, and correct |
|||
* the Z coordinate of the result once at the end. |
|||
* The exception is the precomputed G table points, which are actually |
|||
* affine. Compared to the base used for other points, they have a Z ratio |
|||
* of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same |
|||
* isomorphism to efficiently add with a known Z inverse. |
|||
*/ |
|||
secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, a); |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { |
|||
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); |
|||
} |
|||
|
|||
/* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ |
|||
secp256k1_scalar_split_128(&ng_1, &ng_128, ng); |
|||
|
|||
/* Build wnaf representation for ng_1 and ng_128 */ |
|||
bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); |
|||
bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); |
|||
if (bits_ng_1 > bits) { |
|||
bits = bits_ng_1; |
|||
} |
|||
if (bits_ng_128 > bits) { |
|||
bits = bits_ng_128; |
|||
} |
|||
#else |
|||
bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G); |
|||
if (bits_ng > bits) { |
|||
bits = bits_ng; |
|||
} |
|||
#endif |
|||
|
|||
secp256k1_gej_set_infinity(r); |
|||
|
|||
for (i = bits - 1; i >= 0; i--) { |
|||
int n; |
|||
secp256k1_gej_double_var(r, r, NULL); |
|||
#ifdef USE_ENDOMORPHISM |
|||
if (i < bits_na_1 && (n = wnaf_na_1[i])) { |
|||
ECMULT_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); |
|||
secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); |
|||
} |
|||
if (i < bits_na_lam && (n = wnaf_na_lam[i])) { |
|||
ECMULT_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); |
|||
secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); |
|||
} |
|||
if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { |
|||
ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); |
|||
secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); |
|||
} |
|||
if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { |
|||
ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G); |
|||
secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); |
|||
} |
|||
#else |
|||
if (i < bits_na && (n = wnaf_na[i])) { |
|||
ECMULT_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); |
|||
secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); |
|||
} |
|||
if (i < bits_ng && (n = wnaf_ng[i])) { |
|||
ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); |
|||
secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); |
|||
} |
|||
#endif |
|||
} |
|||
|
|||
if (!r->infinity) { |
|||
secp256k1_fe_mul(&r->z, &r->z, &Z); |
|||
} |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,119 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_FIELD_ |
|||
#define _SECP256K1_FIELD_ |
|||
|
|||
/** Field element module.
|
|||
* |
|||
* Field elements can be represented in several ways, but code accessing |
|||
* it (and implementations) need to take certain properaties into account: |
|||
* - Each field element can be normalized or not. |
|||
* - Each field element has a magnitude, which represents how far away |
|||
* its representation is away from normalization. Normalized elements |
|||
* always have a magnitude of 1, but a magnitude of 1 doesn't imply |
|||
* normality. |
|||
*/ |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
#if defined(USE_FIELD_10X26) |
|||
#include "field_10x26.h" |
|||
#elif defined(USE_FIELD_5X52) |
|||
#include "field_5x52.h" |
|||
#else |
|||
#error "Please select field implementation" |
|||
#endif |
|||
|
|||
/** Normalize a field element. */ |
|||
static void secp256k1_fe_normalize(secp256k1_fe_t *r); |
|||
|
|||
/** Weakly normalize a field element: reduce it magnitude to 1, but don't fully normalize. */ |
|||
static void secp256k1_fe_normalize_weak(secp256k1_fe_t *r); |
|||
|
|||
/** Normalize a field element, without constant-time guarantee. */ |
|||
static void secp256k1_fe_normalize_var(secp256k1_fe_t *r); |
|||
|
|||
/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field
|
|||
* implementation may optionally normalize the input, but this should not be relied upon. */ |
|||
static int secp256k1_fe_normalizes_to_zero(secp256k1_fe_t *r); |
|||
|
|||
/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field
|
|||
* implementation may optionally normalize the input, but this should not be relied upon. */ |
|||
static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe_t *r); |
|||
|
|||
/** Set a field element equal to a small integer. Resulting field element is normalized. */ |
|||
static void secp256k1_fe_set_int(secp256k1_fe_t *r, int a); |
|||
|
|||
/** Verify whether a field element is zero. Requires the input to be normalized. */ |
|||
static int secp256k1_fe_is_zero(const secp256k1_fe_t *a); |
|||
|
|||
/** Check the "oddness" of a field element. Requires the input to be normalized. */ |
|||
static int secp256k1_fe_is_odd(const secp256k1_fe_t *a); |
|||
|
|||
/** Compare two field elements. Requires magnitude-1 inputs. */ |
|||
static int secp256k1_fe_equal_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b); |
|||
|
|||
/** Compare two field elements. Requires both inputs to be normalized */ |
|||
static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b); |
|||
|
|||
/** Set a field element equal to 32-byte big endian value. If succesful, the resulting field element is normalized. */ |
|||
static int secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a); |
|||
|
|||
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ |
|||
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe_t *a); |
|||
|
|||
/** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input
|
|||
* as an argument. The magnitude of the output is one higher. */ |
|||
static void secp256k1_fe_negate(secp256k1_fe_t *r, const secp256k1_fe_t *a, int m); |
|||
|
|||
/** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that
|
|||
* small integer. */ |
|||
static void secp256k1_fe_mul_int(secp256k1_fe_t *r, int a); |
|||
|
|||
/** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */ |
|||
static void secp256k1_fe_add(secp256k1_fe_t *r, const secp256k1_fe_t *a); |
|||
|
|||
/** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8.
|
|||
* The output magnitude is 1 (but not guaranteed to be normalized). */ |
|||
static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t * SECP256K1_RESTRICT b); |
|||
|
|||
/** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8.
|
|||
* The output magnitude is 1 (but not guaranteed to be normalized). */ |
|||
static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a); |
|||
|
|||
/** Sets a field element to be the (modular) square root (if any exist) of another. Requires the
|
|||
* input's magnitude to be at most 8. The output magnitude is 1 (but not guaranteed to be |
|||
* normalized). Return value indicates whether a square root was found. */ |
|||
static int secp256k1_fe_sqrt_var(secp256k1_fe_t *r, const secp256k1_fe_t *a); |
|||
|
|||
/** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be
|
|||
* at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */ |
|||
static void secp256k1_fe_inv(secp256k1_fe_t *r, const secp256k1_fe_t *a); |
|||
|
|||
/** Potentially faster version of secp256k1_fe_inv, without constant-time guarantee. */ |
|||
static void secp256k1_fe_inv_var(secp256k1_fe_t *r, const secp256k1_fe_t *a); |
|||
|
|||
/** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be
|
|||
* at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and |
|||
* outputs must not overlap in memory. */ |
|||
static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe_t *r, const secp256k1_fe_t *a); |
|||
|
|||
/** Convert a field element to the storage type. */ |
|||
static void secp256k1_fe_to_storage(secp256k1_fe_storage_t *r, const secp256k1_fe_t*); |
|||
|
|||
/** Convert a field element back from the storage type. */ |
|||
static void secp256k1_fe_from_storage(secp256k1_fe_t *r, const secp256k1_fe_storage_t*); |
|||
|
|||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ |
|||
static void secp256k1_fe_storage_cmov(secp256k1_fe_storage_t *r, const secp256k1_fe_storage_t *a, int flag); |
|||
|
|||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ |
|||
static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag); |
|||
|
|||
#endif |
@ -0,0 +1,47 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_FIELD_REPR_ |
|||
#define _SECP256K1_FIELD_REPR_ |
|||
|
|||
#include <stdint.h> |
|||
|
|||
typedef struct { |
|||
/* X = sum(i=0..9, elem[i]*2^26) mod n */ |
|||
uint32_t n[10]; |
|||
#ifdef VERIFY |
|||
int magnitude; |
|||
int normalized; |
|||
#endif |
|||
} secp256k1_fe_t; |
|||
|
|||
/* Unpacks a constant into a overlapping multi-limbed FE element. */ |
|||
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ |
|||
(d0) & 0x3FFFFFFUL, \ |
|||
(((uint32_t)d0) >> 26) | ((uint32_t)(d1) & 0xFFFFFUL) << 6, \ |
|||
(((uint32_t)d1) >> 20) | ((uint32_t)(d2) & 0x3FFFUL) << 12, \ |
|||
(((uint32_t)d2) >> 14) | ((uint32_t)(d3) & 0xFFUL) << 18, \ |
|||
(((uint32_t)d3) >> 8) | ((uint32_t)(d4) & 0x3UL) << 24, \ |
|||
(((uint32_t)d4) >> 2) & 0x3FFFFFFUL, \ |
|||
(((uint32_t)d4) >> 28) | ((uint32_t)(d5) & 0x3FFFFFUL) << 4, \ |
|||
(((uint32_t)d5) >> 22) | ((uint32_t)(d6) & 0xFFFFUL) << 10, \ |
|||
(((uint32_t)d6) >> 16) | ((uint32_t)(d7) & 0x3FFUL) << 16, \ |
|||
(((uint32_t)d7) >> 10) \ |
|||
} |
|||
|
|||
#ifdef VERIFY |
|||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)), 1, 1} |
|||
#else |
|||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0))} |
|||
#endif |
|||
|
|||
typedef struct { |
|||
uint32_t n[8]; |
|||
} secp256k1_fe_storage_t; |
|||
|
|||
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }} |
|||
#define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0] |
|||
#endif |
File diff suppressed because it is too large
@ -0,0 +1,47 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_FIELD_REPR_ |
|||
#define _SECP256K1_FIELD_REPR_ |
|||
|
|||
#include <stdint.h> |
|||
|
|||
typedef struct { |
|||
/* X = sum(i=0..4, elem[i]*2^52) mod n */ |
|||
uint64_t n[5]; |
|||
#ifdef VERIFY |
|||
int magnitude; |
|||
int normalized; |
|||
#endif |
|||
} secp256k1_fe_t; |
|||
|
|||
/* Unpacks a constant into a overlapping multi-limbed FE element. */ |
|||
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ |
|||
(d0) | ((uint64_t)(d1) & 0xFFFFFUL) << 32, \ |
|||
((uint64_t)(d1) >> 20) | ((uint64_t)(d2)) << 12 | ((uint64_t)(d3) & 0xFFUL) << 44, \ |
|||
((uint64_t)(d3) >> 8) | ((uint64_t)(d4) & 0xFFFFFFFUL) << 24, \ |
|||
((uint64_t)(d4) >> 28) | ((uint64_t)(d5)) << 4 | ((uint64_t)(d6) & 0xFFFFUL) << 36, \ |
|||
((uint64_t)(d6) >> 16) | ((uint64_t)(d7)) << 16 \ |
|||
} |
|||
|
|||
#ifdef VERIFY |
|||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)), 1, 1} |
|||
#else |
|||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0))} |
|||
#endif |
|||
|
|||
typedef struct { |
|||
uint64_t n[4]; |
|||
} secp256k1_fe_storage_t; |
|||
|
|||
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \ |
|||
(d0) | ((uint64_t)(d1)) << 32, \ |
|||
(d2) | ((uint64_t)(d3)) << 32, \ |
|||
(d4) | ((uint64_t)(d5)) << 32, \ |
|||
(d6) | ((uint64_t)(d7)) << 32 \ |
|||
}} |
|||
|
|||
#endif |
@ -0,0 +1,502 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013-2014 Diederik Huys, Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
/**
|
|||
* Changelog: |
|||
* - March 2013, Diederik Huys: original version |
|||
* - November 2014, Pieter Wuille: updated to use Peter Dettman's parallel multiplication algorithm |
|||
* - December 2014, Pieter Wuille: converted from YASM to GCC inline assembly |
|||
*/ |
|||
|
|||
#ifndef _SECP256K1_FIELD_INNER5X52_IMPL_H_ |
|||
#define _SECP256K1_FIELD_INNER5X52_IMPL_H_ |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { |
|||
/**
|
|||
* Registers: rdx:rax = multiplication accumulator |
|||
* r9:r8 = c |
|||
* r15:rcx = d |
|||
* r10-r14 = a0-a4 |
|||
* rbx = b |
|||
* rdi = r |
|||
* rsi = a / t? |
|||
*/ |
|||
uint64_t tmp1, tmp2, tmp3; |
|||
__asm__ __volatile__( |
|||
"movq 0(%%rsi),%%r10\n" |
|||
"movq 8(%%rsi),%%r11\n" |
|||
"movq 16(%%rsi),%%r12\n" |
|||
"movq 24(%%rsi),%%r13\n" |
|||
"movq 32(%%rsi),%%r14\n" |
|||
|
|||
/* d += a3 * b0 */ |
|||
"movq 0(%%rbx),%%rax\n" |
|||
"mulq %%r13\n" |
|||
"movq %%rax,%%rcx\n" |
|||
"movq %%rdx,%%r15\n" |
|||
/* d += a2 * b1 */ |
|||
"movq 8(%%rbx),%%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a1 * b2 */ |
|||
"movq 16(%%rbx),%%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d = a0 * b3 */ |
|||
"movq 24(%%rbx),%%rax\n" |
|||
"mulq %%r10\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* c = a4 * b4 */ |
|||
"movq 32(%%rbx),%%rax\n" |
|||
"mulq %%r14\n" |
|||
"movq %%rax,%%r8\n" |
|||
"movq %%rdx,%%r9\n" |
|||
/* d += (c & M) * R */ |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* c >>= 52 (%%r8 only) */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
/* t3 (tmp1) = d & M */ |
|||
"movq %%rcx,%%rsi\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rsi\n" |
|||
"movq %%rsi,%q1\n" |
|||
/* d >>= 52 */ |
|||
"shrdq $52,%%r15,%%rcx\n" |
|||
"xorq %%r15,%%r15\n" |
|||
/* d += a4 * b0 */ |
|||
"movq 0(%%rbx),%%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a3 * b1 */ |
|||
"movq 8(%%rbx),%%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a2 * b2 */ |
|||
"movq 16(%%rbx),%%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a1 * b3 */ |
|||
"movq 24(%%rbx),%%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a0 * b4 */ |
|||
"movq 32(%%rbx),%%rax\n" |
|||
"mulq %%r10\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += c * R */ |
|||
"movq %%r8,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* t4 = d & M (%%rsi) */ |
|||
"movq %%rcx,%%rsi\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rsi\n" |
|||
/* d >>= 52 */ |
|||
"shrdq $52,%%r15,%%rcx\n" |
|||
"xorq %%r15,%%r15\n" |
|||
/* tx = t4 >> 48 (tmp3) */ |
|||
"movq %%rsi,%%rax\n" |
|||
"shrq $48,%%rax\n" |
|||
"movq %%rax,%q3\n" |
|||
/* t4 &= (M >> 4) (tmp2) */ |
|||
"movq $0xffffffffffff,%%rax\n" |
|||
"andq %%rax,%%rsi\n" |
|||
"movq %%rsi,%q2\n" |
|||
/* c = a0 * b0 */ |
|||
"movq 0(%%rbx),%%rax\n" |
|||
"mulq %%r10\n" |
|||
"movq %%rax,%%r8\n" |
|||
"movq %%rdx,%%r9\n" |
|||
/* d += a4 * b1 */ |
|||
"movq 8(%%rbx),%%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a3 * b2 */ |
|||
"movq 16(%%rbx),%%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a2 * b3 */ |
|||
"movq 24(%%rbx),%%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a1 * b4 */ |
|||
"movq 32(%%rbx),%%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* u0 = d & M (%%rsi) */ |
|||
"movq %%rcx,%%rsi\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rsi\n" |
|||
/* d >>= 52 */ |
|||
"shrdq $52,%%r15,%%rcx\n" |
|||
"xorq %%r15,%%r15\n" |
|||
/* u0 = (u0 << 4) | tx (%%rsi) */ |
|||
"shlq $4,%%rsi\n" |
|||
"movq %q3,%%rax\n" |
|||
"orq %%rax,%%rsi\n" |
|||
/* c += u0 * (R >> 4) */ |
|||
"movq $0x1000003d1,%%rax\n" |
|||
"mulq %%rsi\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* r[0] = c & M */ |
|||
"movq %%r8,%%rax\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rax\n" |
|||
"movq %%rax,0(%%rdi)\n" |
|||
/* c >>= 52 */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
"xorq %%r9,%%r9\n" |
|||
/* c += a1 * b0 */ |
|||
"movq 0(%%rbx),%%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* c += a0 * b1 */ |
|||
"movq 8(%%rbx),%%rax\n" |
|||
"mulq %%r10\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* d += a4 * b2 */ |
|||
"movq 16(%%rbx),%%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a3 * b3 */ |
|||
"movq 24(%%rbx),%%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a2 * b4 */ |
|||
"movq 32(%%rbx),%%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* c += (d & M) * R */ |
|||
"movq %%rcx,%%rax\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* d >>= 52 */ |
|||
"shrdq $52,%%r15,%%rcx\n" |
|||
"xorq %%r15,%%r15\n" |
|||
/* r[1] = c & M */ |
|||
"movq %%r8,%%rax\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rax\n" |
|||
"movq %%rax,8(%%rdi)\n" |
|||
/* c >>= 52 */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
"xorq %%r9,%%r9\n" |
|||
/* c += a2 * b0 */ |
|||
"movq 0(%%rbx),%%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* c += a1 * b1 */ |
|||
"movq 8(%%rbx),%%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* c += a0 * b2 (last use of %%r10 = a0) */ |
|||
"movq 16(%%rbx),%%rax\n" |
|||
"mulq %%r10\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* fetch t3 (%%r10, overwrites a0), t4 (%%rsi) */ |
|||
"movq %q2,%%rsi\n" |
|||
"movq %q1,%%r10\n" |
|||
/* d += a4 * b3 */ |
|||
"movq 24(%%rbx),%%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* d += a3 * b4 */ |
|||
"movq 32(%%rbx),%%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax,%%rcx\n" |
|||
"adcq %%rdx,%%r15\n" |
|||
/* c += (d & M) * R */ |
|||
"movq %%rcx,%%rax\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* d >>= 52 (%%rcx only) */ |
|||
"shrdq $52,%%r15,%%rcx\n" |
|||
/* r[2] = c & M */ |
|||
"movq %%r8,%%rax\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rax\n" |
|||
"movq %%rax,16(%%rdi)\n" |
|||
/* c >>= 52 */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
"xorq %%r9,%%r9\n" |
|||
/* c += t3 */ |
|||
"addq %%r10,%%r8\n" |
|||
/* c += d * R */ |
|||
"movq %%rcx,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* r[3] = c & M */ |
|||
"movq %%r8,%%rax\n" |
|||
"movq $0xfffffffffffff,%%rdx\n" |
|||
"andq %%rdx,%%rax\n" |
|||
"movq %%rax,24(%%rdi)\n" |
|||
/* c >>= 52 (%%r8 only) */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
/* c += t4 (%%r8 only) */ |
|||
"addq %%rsi,%%r8\n" |
|||
/* r[4] = c */ |
|||
"movq %%r8,32(%%rdi)\n" |
|||
: "+S"(a), "=m"(tmp1), "=m"(tmp2), "=m"(tmp3) |
|||
: "b"(b), "D"(r) |
|||
: "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory" |
|||
); |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) { |
|||
/**
|
|||
* Registers: rdx:rax = multiplication accumulator |
|||
* r9:r8 = c |
|||
* rcx:rbx = d |
|||
* r10-r14 = a0-a4 |
|||
* r15 = M (0xfffffffffffff) |
|||
* rdi = r |
|||
* rsi = a / t? |
|||
*/ |
|||
uint64_t tmp1, tmp2, tmp3; |
|||
__asm__ __volatile__( |
|||
"movq 0(%%rsi),%%r10\n" |
|||
"movq 8(%%rsi),%%r11\n" |
|||
"movq 16(%%rsi),%%r12\n" |
|||
"movq 24(%%rsi),%%r13\n" |
|||
"movq 32(%%rsi),%%r14\n" |
|||
"movq $0xfffffffffffff,%%r15\n" |
|||
|
|||
/* d = (a0*2) * a3 */ |
|||
"leaq (%%r10,%%r10,1),%%rax\n" |
|||
"mulq %%r13\n" |
|||
"movq %%rax,%%rbx\n" |
|||
"movq %%rdx,%%rcx\n" |
|||
/* d += (a1*2) * a2 */ |
|||
"leaq (%%r11,%%r11,1),%%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* c = a4 * a4 */ |
|||
"movq %%r14,%%rax\n" |
|||
"mulq %%r14\n" |
|||
"movq %%rax,%%r8\n" |
|||
"movq %%rdx,%%r9\n" |
|||
/* d += (c & M) * R */ |
|||
"andq %%r15,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* c >>= 52 (%%r8 only) */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
/* t3 (tmp1) = d & M */ |
|||
"movq %%rbx,%%rsi\n" |
|||
"andq %%r15,%%rsi\n" |
|||
"movq %%rsi,%q1\n" |
|||
/* d >>= 52 */ |
|||
"shrdq $52,%%rcx,%%rbx\n" |
|||
"xorq %%rcx,%%rcx\n" |
|||
/* a4 *= 2 */ |
|||
"addq %%r14,%%r14\n" |
|||
/* d += a0 * a4 */ |
|||
"movq %%r10,%%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* d+= (a1*2) * a3 */ |
|||
"leaq (%%r11,%%r11,1),%%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* d += a2 * a2 */ |
|||
"movq %%r12,%%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* d += c * R */ |
|||
"movq %%r8,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* t4 = d & M (%%rsi) */ |
|||
"movq %%rbx,%%rsi\n" |
|||
"andq %%r15,%%rsi\n" |
|||
/* d >>= 52 */ |
|||
"shrdq $52,%%rcx,%%rbx\n" |
|||
"xorq %%rcx,%%rcx\n" |
|||
/* tx = t4 >> 48 (tmp3) */ |
|||
"movq %%rsi,%%rax\n" |
|||
"shrq $48,%%rax\n" |
|||
"movq %%rax,%q3\n" |
|||
/* t4 &= (M >> 4) (tmp2) */ |
|||
"movq $0xffffffffffff,%%rax\n" |
|||
"andq %%rax,%%rsi\n" |
|||
"movq %%rsi,%q2\n" |
|||
/* c = a0 * a0 */ |
|||
"movq %%r10,%%rax\n" |
|||
"mulq %%r10\n" |
|||
"movq %%rax,%%r8\n" |
|||
"movq %%rdx,%%r9\n" |
|||
/* d += a1 * a4 */ |
|||
"movq %%r11,%%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* d += (a2*2) * a3 */ |
|||
"leaq (%%r12,%%r12,1),%%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* u0 = d & M (%%rsi) */ |
|||
"movq %%rbx,%%rsi\n" |
|||
"andq %%r15,%%rsi\n" |
|||
/* d >>= 52 */ |
|||
"shrdq $52,%%rcx,%%rbx\n" |
|||
"xorq %%rcx,%%rcx\n" |
|||
/* u0 = (u0 << 4) | tx (%%rsi) */ |
|||
"shlq $4,%%rsi\n" |
|||
"movq %q3,%%rax\n" |
|||
"orq %%rax,%%rsi\n" |
|||
/* c += u0 * (R >> 4) */ |
|||
"movq $0x1000003d1,%%rax\n" |
|||
"mulq %%rsi\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* r[0] = c & M */ |
|||
"movq %%r8,%%rax\n" |
|||
"andq %%r15,%%rax\n" |
|||
"movq %%rax,0(%%rdi)\n" |
|||
/* c >>= 52 */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
"xorq %%r9,%%r9\n" |
|||
/* a0 *= 2 */ |
|||
"addq %%r10,%%r10\n" |
|||
/* c += a0 * a1 */ |
|||
"movq %%r10,%%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* d += a2 * a4 */ |
|||
"movq %%r12,%%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* d += a3 * a3 */ |
|||
"movq %%r13,%%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* c += (d & M) * R */ |
|||
"movq %%rbx,%%rax\n" |
|||
"andq %%r15,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* d >>= 52 */ |
|||
"shrdq $52,%%rcx,%%rbx\n" |
|||
"xorq %%rcx,%%rcx\n" |
|||
/* r[1] = c & M */ |
|||
"movq %%r8,%%rax\n" |
|||
"andq %%r15,%%rax\n" |
|||
"movq %%rax,8(%%rdi)\n" |
|||
/* c >>= 52 */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
"xorq %%r9,%%r9\n" |
|||
/* c += a0 * a2 (last use of %%r10) */ |
|||
"movq %%r10,%%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* fetch t3 (%%r10, overwrites a0),t4 (%%rsi) */ |
|||
"movq %q2,%%rsi\n" |
|||
"movq %q1,%%r10\n" |
|||
/* c += a1 * a1 */ |
|||
"movq %%r11,%%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* d += a3 * a4 */ |
|||
"movq %%r13,%%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax,%%rbx\n" |
|||
"adcq %%rdx,%%rcx\n" |
|||
/* c += (d & M) * R */ |
|||
"movq %%rbx,%%rax\n" |
|||
"andq %%r15,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* d >>= 52 (%%rbx only) */ |
|||
"shrdq $52,%%rcx,%%rbx\n" |
|||
/* r[2] = c & M */ |
|||
"movq %%r8,%%rax\n" |
|||
"andq %%r15,%%rax\n" |
|||
"movq %%rax,16(%%rdi)\n" |
|||
/* c >>= 52 */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
"xorq %%r9,%%r9\n" |
|||
/* c += t3 */ |
|||
"addq %%r10,%%r8\n" |
|||
/* c += d * R */ |
|||
"movq %%rbx,%%rax\n" |
|||
"movq $0x1000003d10,%%rdx\n" |
|||
"mulq %%rdx\n" |
|||
"addq %%rax,%%r8\n" |
|||
"adcq %%rdx,%%r9\n" |
|||
/* r[3] = c & M */ |
|||
"movq %%r8,%%rax\n" |
|||
"andq %%r15,%%rax\n" |
|||
"movq %%rax,24(%%rdi)\n" |
|||
/* c >>= 52 (%%r8 only) */ |
|||
"shrdq $52,%%r9,%%r8\n" |
|||
/* c += t4 (%%r8 only) */ |
|||
"addq %%rsi,%%r8\n" |
|||
/* r[4] = c */ |
|||
"movq %%r8,32(%%rdi)\n" |
|||
: "+S"(a), "=m"(tmp1), "=m"(tmp2), "=m"(tmp3) |
|||
: "D"(r) |
|||
: "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory" |
|||
); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,456 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_FIELD_REPR_IMPL_H_ |
|||
#define _SECP256K1_FIELD_REPR_IMPL_H_ |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
#include <string.h> |
|||
#include "util.h" |
|||
#include "num.h" |
|||
#include "field.h" |
|||
|
|||
#if defined(USE_ASM_X86_64) |
|||
#include "field_5x52_asm_impl.h" |
|||
#else |
|||
#include "field_5x52_int128_impl.h" |
|||
#endif |
|||
|
|||
/** Implements arithmetic modulo FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F,
|
|||
* represented as 5 uint64_t's in base 2^52. The values are allowed to contain >52 each. In particular, |
|||
* each FieldElem has a 'magnitude' associated with it. Internally, a magnitude M means each element |
|||
* is at most M*(2^53-1), except the most significant one, which is limited to M*(2^49-1). All operations |
|||
* accept any input with magnitude at most M, and have different rules for propagating magnitude to their |
|||
* output. |
|||
*/ |
|||
|
|||
#ifdef VERIFY |
|||
static void secp256k1_fe_verify(const secp256k1_fe_t *a) { |
|||
const uint64_t *d = a->n; |
|||
int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; |
|||
/* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ |
|||
r &= (d[0] <= 0xFFFFFFFFFFFFFULL * m); |
|||
r &= (d[1] <= 0xFFFFFFFFFFFFFULL * m); |
|||
r &= (d[2] <= 0xFFFFFFFFFFFFFULL * m); |
|||
r &= (d[3] <= 0xFFFFFFFFFFFFFULL * m); |
|||
r &= (d[4] <= 0x0FFFFFFFFFFFFULL * m); |
|||
r &= (a->magnitude >= 0); |
|||
r &= (a->magnitude <= 2048); |
|||
if (a->normalized) { |
|||
r &= (a->magnitude <= 1); |
|||
if (r && (d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) { |
|||
r &= (d[0] < 0xFFFFEFFFFFC2FULL); |
|||
} |
|||
} |
|||
VERIFY_CHECK(r == 1); |
|||
} |
|||
#else |
|||
static void secp256k1_fe_verify(const secp256k1_fe_t *a) { |
|||
(void)a; |
|||
} |
|||
#endif |
|||
|
|||
static void secp256k1_fe_normalize(secp256k1_fe_t *r) { |
|||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; |
|||
|
|||
/* Reduce t4 at the start so there will be at most a single carry from the first pass */ |
|||
uint64_t m; |
|||
uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; |
|||
|
|||
/* The first pass ensures the magnitude is 1, ... */ |
|||
t0 += x * 0x1000003D1ULL; |
|||
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; |
|||
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1; |
|||
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2; |
|||
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3; |
|||
|
|||
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ |
|||
VERIFY_CHECK(t4 >> 49 == 0); |
|||
|
|||
/* At most a single final reduction is needed; check if the value is >= the field characteristic */ |
|||
x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL) |
|||
& (t0 >= 0xFFFFEFFFFFC2FULL)); |
|||
|
|||
/* Apply the final reduction (for constant-time behaviour, we do it always) */ |
|||
t0 += x * 0x1000003D1ULL; |
|||
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; |
|||
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; |
|||
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; |
|||
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; |
|||
|
|||
/* If t4 didn't carry to bit 48 already, then it should have after any final reduction */ |
|||
VERIFY_CHECK(t4 >> 48 == x); |
|||
|
|||
/* Mask off the possible multiple of 2^256 from the final reduction */ |
|||
t4 &= 0x0FFFFFFFFFFFFULL; |
|||
|
|||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; |
|||
|
|||
#ifdef VERIFY |
|||
r->magnitude = 1; |
|||
r->normalized = 1; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_fe_normalize_weak(secp256k1_fe_t *r) { |
|||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; |
|||
|
|||
/* Reduce t4 at the start so there will be at most a single carry from the first pass */ |
|||
uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; |
|||
|
|||
/* The first pass ensures the magnitude is 1, ... */ |
|||
t0 += x * 0x1000003D1ULL; |
|||
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; |
|||
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; |
|||
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; |
|||
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; |
|||
|
|||
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ |
|||
VERIFY_CHECK(t4 >> 49 == 0); |
|||
|
|||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; |
|||
|
|||
#ifdef VERIFY |
|||
r->magnitude = 1; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_fe_normalize_var(secp256k1_fe_t *r) { |
|||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; |
|||
|
|||
/* Reduce t4 at the start so there will be at most a single carry from the first pass */ |
|||
uint64_t m; |
|||
uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; |
|||
|
|||
/* The first pass ensures the magnitude is 1, ... */ |
|||
t0 += x * 0x1000003D1ULL; |
|||
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; |
|||
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1; |
|||
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2; |
|||
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3; |
|||
|
|||
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ |
|||
VERIFY_CHECK(t4 >> 49 == 0); |
|||
|
|||
/* At most a single final reduction is needed; check if the value is >= the field characteristic */ |
|||
x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL) |
|||
& (t0 >= 0xFFFFEFFFFFC2FULL)); |
|||
|
|||
if (x) { |
|||
t0 += 0x1000003D1ULL; |
|||
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; |
|||
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; |
|||
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; |
|||
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; |
|||
|
|||
/* If t4 didn't carry to bit 48 already, then it should have after any final reduction */ |
|||
VERIFY_CHECK(t4 >> 48 == x); |
|||
|
|||
/* Mask off the possible multiple of 2^256 from the final reduction */ |
|||
t4 &= 0x0FFFFFFFFFFFFULL; |
|||
} |
|||
|
|||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; |
|||
|
|||
#ifdef VERIFY |
|||
r->magnitude = 1; |
|||
r->normalized = 1; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
static int secp256k1_fe_normalizes_to_zero(secp256k1_fe_t *r) { |
|||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; |
|||
|
|||
/* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ |
|||
uint64_t z0, z1; |
|||
|
|||
/* Reduce t4 at the start so there will be at most a single carry from the first pass */ |
|||
uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; |
|||
|
|||
/* The first pass ensures the magnitude is 1, ... */ |
|||
t0 += x * 0x1000003D1ULL; |
|||
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0 = t0; z1 = t0 ^ 0x1000003D0ULL; |
|||
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1; |
|||
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2; |
|||
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3; |
|||
z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL; |
|||
|
|||
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ |
|||
VERIFY_CHECK(t4 >> 49 == 0); |
|||
|
|||
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); |
|||
} |
|||
|
|||
static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe_t *r) { |
|||
uint64_t t0, t1, t2, t3, t4; |
|||
uint64_t z0, z1; |
|||
uint64_t x; |
|||
|
|||
t0 = r->n[0]; |
|||
t4 = r->n[4]; |
|||
|
|||
/* Reduce t4 at the start so there will be at most a single carry from the first pass */ |
|||
x = t4 >> 48; |
|||
|
|||
/* The first pass ensures the magnitude is 1, ... */ |
|||
t0 += x * 0x1000003D1ULL; |
|||
|
|||
/* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ |
|||
z0 = t0 & 0xFFFFFFFFFFFFFULL; |
|||
z1 = z0 ^ 0x1000003D0ULL; |
|||
|
|||
/* Fast return path should catch the majority of cases */ |
|||
if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) { |
|||
return 0; |
|||
} |
|||
|
|||
t1 = r->n[1]; |
|||
t2 = r->n[2]; |
|||
t3 = r->n[3]; |
|||
|
|||
t4 &= 0x0FFFFFFFFFFFFULL; |
|||
|
|||
t1 += (t0 >> 52); t0 = z0; |
|||
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1; |
|||
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2; |
|||
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3; |
|||
z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL; |
|||
|
|||
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ |
|||
VERIFY_CHECK(t4 >> 49 == 0); |
|||
|
|||
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe_t *r, int a) { |
|||
r->n[0] = a; |
|||
r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; |
|||
#ifdef VERIFY |
|||
r->magnitude = 1; |
|||
r->normalized = 1; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe_t *a) { |
|||
const uint64_t *t = a->n; |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK(a->normalized); |
|||
secp256k1_fe_verify(a); |
|||
#endif |
|||
return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe_t *a) { |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK(a->normalized); |
|||
secp256k1_fe_verify(a); |
|||
#endif |
|||
return a->n[0] & 1; |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe_t *a) { |
|||
int i; |
|||
#ifdef VERIFY |
|||
a->magnitude = 0; |
|||
a->normalized = 1; |
|||
#endif |
|||
for (i=0; i<5; i++) { |
|||
a->n[i] = 0; |
|||
} |
|||
} |
|||
|
|||
static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b) { |
|||
int i; |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK(a->normalized); |
|||
VERIFY_CHECK(b->normalized); |
|||
secp256k1_fe_verify(a); |
|||
secp256k1_fe_verify(b); |
|||
#endif |
|||
for (i = 4; i >= 0; i--) { |
|||
if (a->n[i] > b->n[i]) { |
|||
return 1; |
|||
} |
|||
if (a->n[i] < b->n[i]) { |
|||
return -1; |
|||
} |
|||
} |
|||
return 0; |
|||
} |
|||
|
|||
static int secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { |
|||
int i; |
|||
r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; |
|||
for (i=0; i<32; i++) { |
|||
int j; |
|||
for (j=0; j<2; j++) { |
|||
int limb = (8*i+4*j)/52; |
|||
int shift = (8*i+4*j)%52; |
|||
r->n[limb] |= (uint64_t)((a[31-i] >> (4*j)) & 0xF) << shift; |
|||
} |
|||
} |
|||
if (r->n[4] == 0x0FFFFFFFFFFFFULL && (r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL && r->n[0] >= 0xFFFFEFFFFFC2FULL) { |
|||
return 0; |
|||
} |
|||
#ifdef VERIFY |
|||
r->magnitude = 1; |
|||
r->normalized = 1; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
return 1; |
|||
} |
|||
|
|||
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ |
|||
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe_t *a) { |
|||
int i; |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK(a->normalized); |
|||
secp256k1_fe_verify(a); |
|||
#endif |
|||
for (i=0; i<32; i++) { |
|||
int j; |
|||
int c = 0; |
|||
for (j=0; j<2; j++) { |
|||
int limb = (8*i+4*j)/52; |
|||
int shift = (8*i+4*j)%52; |
|||
c |= ((a->n[limb] >> shift) & 0xF) << (4 * j); |
|||
} |
|||
r[31-i] = c; |
|||
} |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe_t *r, const secp256k1_fe_t *a, int m) { |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK(a->magnitude <= m); |
|||
secp256k1_fe_verify(a); |
|||
#endif |
|||
r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0]; |
|||
r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1]; |
|||
r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2]; |
|||
r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3]; |
|||
r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4]; |
|||
#ifdef VERIFY |
|||
r->magnitude = m + 1; |
|||
r->normalized = 0; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe_t *r, int a) { |
|||
r->n[0] *= a; |
|||
r->n[1] *= a; |
|||
r->n[2] *= a; |
|||
r->n[3] *= a; |
|||
r->n[4] *= a; |
|||
#ifdef VERIFY |
|||
r->magnitude *= a; |
|||
r->normalized = 0; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe_t *r, const secp256k1_fe_t *a) { |
|||
#ifdef VERIFY |
|||
secp256k1_fe_verify(a); |
|||
#endif |
|||
r->n[0] += a->n[0]; |
|||
r->n[1] += a->n[1]; |
|||
r->n[2] += a->n[2]; |
|||
r->n[3] += a->n[3]; |
|||
r->n[4] += a->n[4]; |
|||
#ifdef VERIFY |
|||
r->magnitude += a->magnitude; |
|||
r->normalized = 0; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t * SECP256K1_RESTRICT b) { |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK(a->magnitude <= 8); |
|||
VERIFY_CHECK(b->magnitude <= 8); |
|||
secp256k1_fe_verify(a); |
|||
secp256k1_fe_verify(b); |
|||
VERIFY_CHECK(r != b); |
|||
#endif |
|||
secp256k1_fe_mul_inner(r->n, a->n, b->n); |
|||
#ifdef VERIFY |
|||
r->magnitude = 1; |
|||
r->normalized = 0; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) { |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK(a->magnitude <= 8); |
|||
secp256k1_fe_verify(a); |
|||
#endif |
|||
secp256k1_fe_sqr_inner(r->n, a->n); |
|||
#ifdef VERIFY |
|||
r->magnitude = 1; |
|||
r->normalized = 0; |
|||
secp256k1_fe_verify(r); |
|||
#endif |
|||
} |
|||
|
|||
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) { |
|||
uint64_t mask0, mask1; |
|||
mask0 = flag + ~((uint64_t)0); |
|||
mask1 = ~mask0; |
|||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); |
|||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); |
|||
r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1); |
|||
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); |
|||
r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1); |
|||
#ifdef VERIFY |
|||
if (a->magnitude > r->magnitude) { |
|||
r->magnitude = a->magnitude; |
|||
} |
|||
r->normalized &= a->normalized; |
|||
#endif |
|||
} |
|||
|
|||
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage_t *r, const secp256k1_fe_storage_t *a, int flag) { |
|||
uint64_t mask0, mask1; |
|||
mask0 = flag + ~((uint64_t)0); |
|||
mask1 = ~mask0; |
|||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); |
|||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); |
|||
r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1); |
|||
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); |
|||
} |
|||
|
|||
static void secp256k1_fe_to_storage(secp256k1_fe_storage_t *r, const secp256k1_fe_t *a) { |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK(a->normalized); |
|||
#endif |
|||
r->n[0] = a->n[0] | a->n[1] << 52; |
|||
r->n[1] = a->n[1] >> 12 | a->n[2] << 40; |
|||
r->n[2] = a->n[2] >> 24 | a->n[3] << 28; |
|||
r->n[3] = a->n[3] >> 36 | a->n[4] << 16; |
|||
} |
|||
|
|||
static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe_t *r, const secp256k1_fe_storage_t *a) { |
|||
r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL; |
|||
r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL); |
|||
r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL); |
|||
r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL); |
|||
r->n[4] = a->n[3] >> 16; |
|||
#ifdef VERIFY |
|||
r->magnitude = 1; |
|||
r->normalized = 1; |
|||
#endif |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,277 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_FIELD_INNER5X52_IMPL_H_ |
|||
#define _SECP256K1_FIELD_INNER5X52_IMPL_H_ |
|||
|
|||
#include <stdint.h> |
|||
|
|||
#ifdef VERIFY |
|||
#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) |
|||
#else |
|||
#define VERIFY_BITS(x, n) do { } while(0) |
|||
#endif |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { |
|||
uint128_t c, d; |
|||
uint64_t t3, t4, tx, u0; |
|||
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; |
|||
const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; |
|||
|
|||
VERIFY_BITS(a[0], 56); |
|||
VERIFY_BITS(a[1], 56); |
|||
VERIFY_BITS(a[2], 56); |
|||
VERIFY_BITS(a[3], 56); |
|||
VERIFY_BITS(a[4], 52); |
|||
VERIFY_BITS(b[0], 56); |
|||
VERIFY_BITS(b[1], 56); |
|||
VERIFY_BITS(b[2], 56); |
|||
VERIFY_BITS(b[3], 56); |
|||
VERIFY_BITS(b[4], 52); |
|||
VERIFY_CHECK(r != b); |
|||
|
|||
/* [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n.
|
|||
* px is a shorthand for sum(a[i]*b[x-i], i=0..x). |
|||
* Note that [x 0 0 0 0 0] = [x*R]. |
|||
*/ |
|||
|
|||
d = (uint128_t)a0 * b[3] |
|||
+ (uint128_t)a1 * b[2] |
|||
+ (uint128_t)a2 * b[1] |
|||
+ (uint128_t)a3 * b[0]; |
|||
VERIFY_BITS(d, 114); |
|||
/* [d 0 0 0] = [p3 0 0 0] */ |
|||
c = (uint128_t)a4 * b[4]; |
|||
VERIFY_BITS(c, 112); |
|||
/* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ |
|||
d += (c & M) * R; c >>= 52; |
|||
VERIFY_BITS(d, 115); |
|||
VERIFY_BITS(c, 60); |
|||
/* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ |
|||
t3 = d & M; d >>= 52; |
|||
VERIFY_BITS(t3, 52); |
|||
VERIFY_BITS(d, 63); |
|||
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ |
|||
|
|||
d += (uint128_t)a0 * b[4] |
|||
+ (uint128_t)a1 * b[3] |
|||
+ (uint128_t)a2 * b[2] |
|||
+ (uint128_t)a3 * b[1] |
|||
+ (uint128_t)a4 * b[0]; |
|||
VERIFY_BITS(d, 115); |
|||
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ |
|||
d += c * R; |
|||
VERIFY_BITS(d, 116); |
|||
/* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ |
|||
t4 = d & M; d >>= 52; |
|||
VERIFY_BITS(t4, 52); |
|||
VERIFY_BITS(d, 64); |
|||
/* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ |
|||
tx = (t4 >> 48); t4 &= (M >> 4); |
|||
VERIFY_BITS(tx, 4); |
|||
VERIFY_BITS(t4, 48); |
|||
/* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ |
|||
|
|||
c = (uint128_t)a0 * b[0]; |
|||
VERIFY_BITS(c, 112); |
|||
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ |
|||
d += (uint128_t)a1 * b[4] |
|||
+ (uint128_t)a2 * b[3] |
|||
+ (uint128_t)a3 * b[2] |
|||
+ (uint128_t)a4 * b[1]; |
|||
VERIFY_BITS(d, 115); |
|||
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
u0 = d & M; d >>= 52; |
|||
VERIFY_BITS(u0, 52); |
|||
VERIFY_BITS(d, 63); |
|||
/* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
/* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
u0 = (u0 << 4) | tx; |
|||
VERIFY_BITS(u0, 56); |
|||
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
c += (uint128_t)u0 * (R >> 4); |
|||
VERIFY_BITS(c, 115); |
|||
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
r[0] = c & M; c >>= 52; |
|||
VERIFY_BITS(r[0], 52); |
|||
VERIFY_BITS(c, 61); |
|||
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
|
|||
c += (uint128_t)a0 * b[1] |
|||
+ (uint128_t)a1 * b[0]; |
|||
VERIFY_BITS(c, 114); |
|||
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ |
|||
d += (uint128_t)a2 * b[4] |
|||
+ (uint128_t)a3 * b[3] |
|||
+ (uint128_t)a4 * b[2]; |
|||
VERIFY_BITS(d, 114); |
|||
/* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ |
|||
c += (d & M) * R; d >>= 52; |
|||
VERIFY_BITS(c, 115); |
|||
VERIFY_BITS(d, 62); |
|||
/* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ |
|||
r[1] = c & M; c >>= 52; |
|||
VERIFY_BITS(r[1], 52); |
|||
VERIFY_BITS(c, 63); |
|||
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ |
|||
|
|||
c += (uint128_t)a0 * b[2] |
|||
+ (uint128_t)a1 * b[1] |
|||
+ (uint128_t)a2 * b[0]; |
|||
VERIFY_BITS(c, 114); |
|||
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ |
|||
d += (uint128_t)a3 * b[4] |
|||
+ (uint128_t)a4 * b[3]; |
|||
VERIFY_BITS(d, 114); |
|||
/* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
c += (d & M) * R; d >>= 52; |
|||
VERIFY_BITS(c, 115); |
|||
VERIFY_BITS(d, 62); |
|||
/* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
|
|||
/* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
r[2] = c & M; c >>= 52; |
|||
VERIFY_BITS(r[2], 52); |
|||
VERIFY_BITS(c, 63); |
|||
/* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
c += d * R + t3;; |
|||
VERIFY_BITS(c, 100); |
|||
/* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
r[3] = c & M; c >>= 52; |
|||
VERIFY_BITS(r[3], 52); |
|||
VERIFY_BITS(c, 48); |
|||
/* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
c += t4; |
|||
VERIFY_BITS(c, 49); |
|||
/* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
r[4] = c; |
|||
VERIFY_BITS(r[4], 49); |
|||
/* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) { |
|||
uint128_t c, d; |
|||
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; |
|||
int64_t t3, t4, tx, u0; |
|||
const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; |
|||
|
|||
VERIFY_BITS(a[0], 56); |
|||
VERIFY_BITS(a[1], 56); |
|||
VERIFY_BITS(a[2], 56); |
|||
VERIFY_BITS(a[3], 56); |
|||
VERIFY_BITS(a[4], 52); |
|||
|
|||
/** [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n.
|
|||
* px is a shorthand for sum(a[i]*a[x-i], i=0..x). |
|||
* Note that [x 0 0 0 0 0] = [x*R]. |
|||
*/ |
|||
|
|||
d = (uint128_t)(a0*2) * a3 |
|||
+ (uint128_t)(a1*2) * a2; |
|||
VERIFY_BITS(d, 114); |
|||
/* [d 0 0 0] = [p3 0 0 0] */ |
|||
c = (uint128_t)a4 * a4; |
|||
VERIFY_BITS(c, 112); |
|||
/* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ |
|||
d += (c & M) * R; c >>= 52; |
|||
VERIFY_BITS(d, 115); |
|||
VERIFY_BITS(c, 60); |
|||
/* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ |
|||
t3 = d & M; d >>= 52; |
|||
VERIFY_BITS(t3, 52); |
|||
VERIFY_BITS(d, 63); |
|||
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ |
|||
|
|||
a4 *= 2; |
|||
d += (uint128_t)a0 * a4 |
|||
+ (uint128_t)(a1*2) * a3 |
|||
+ (uint128_t)a2 * a2; |
|||
VERIFY_BITS(d, 115); |
|||
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ |
|||
d += c * R; |
|||
VERIFY_BITS(d, 116); |
|||
/* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ |
|||
t4 = d & M; d >>= 52; |
|||
VERIFY_BITS(t4, 52); |
|||
VERIFY_BITS(d, 64); |
|||
/* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ |
|||
tx = (t4 >> 48); t4 &= (M >> 4); |
|||
VERIFY_BITS(tx, 4); |
|||
VERIFY_BITS(t4, 48); |
|||
/* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ |
|||
|
|||
c = (uint128_t)a0 * a0; |
|||
VERIFY_BITS(c, 112); |
|||
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ |
|||
d += (uint128_t)a1 * a4 |
|||
+ (uint128_t)(a2*2) * a3; |
|||
VERIFY_BITS(d, 114); |
|||
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
u0 = d & M; d >>= 52; |
|||
VERIFY_BITS(u0, 52); |
|||
VERIFY_BITS(d, 62); |
|||
/* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
/* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
u0 = (u0 << 4) | tx; |
|||
VERIFY_BITS(u0, 56); |
|||
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
c += (uint128_t)u0 * (R >> 4); |
|||
VERIFY_BITS(c, 113); |
|||
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
r[0] = c & M; c >>= 52; |
|||
VERIFY_BITS(r[0], 52); |
|||
VERIFY_BITS(c, 61); |
|||
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ |
|||
|
|||
a0 *= 2; |
|||
c += (uint128_t)a0 * a1; |
|||
VERIFY_BITS(c, 114); |
|||
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ |
|||
d += (uint128_t)a2 * a4 |
|||
+ (uint128_t)a3 * a3; |
|||
VERIFY_BITS(d, 114); |
|||
/* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ |
|||
c += (d & M) * R; d >>= 52; |
|||
VERIFY_BITS(c, 115); |
|||
VERIFY_BITS(d, 62); |
|||
/* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ |
|||
r[1] = c & M; c >>= 52; |
|||
VERIFY_BITS(r[1], 52); |
|||
VERIFY_BITS(c, 63); |
|||
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ |
|||
|
|||
c += (uint128_t)a0 * a2 |
|||
+ (uint128_t)a1 * a1; |
|||
VERIFY_BITS(c, 114); |
|||
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ |
|||
d += (uint128_t)a3 * a4; |
|||
VERIFY_BITS(d, 114); |
|||
/* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
c += (d & M) * R; d >>= 52; |
|||
VERIFY_BITS(c, 115); |
|||
VERIFY_BITS(d, 62); |
|||
/* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
r[2] = c & M; c >>= 52; |
|||
VERIFY_BITS(r[2], 52); |
|||
VERIFY_BITS(c, 63); |
|||
/* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
|
|||
c += d * R + t3;; |
|||
VERIFY_BITS(c, 100); |
|||
/* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
r[3] = c & M; c >>= 52; |
|||
VERIFY_BITS(r[3], 52); |
|||
VERIFY_BITS(c, 48); |
|||
/* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
c += t4; |
|||
VERIFY_BITS(c, 49); |
|||
/* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
r[4] = c; |
|||
VERIFY_BITS(r[4], 49); |
|||
/* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,271 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_FIELD_IMPL_H_ |
|||
#define _SECP256K1_FIELD_IMPL_H_ |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
#include "util.h" |
|||
|
|||
#if defined(USE_FIELD_10X26) |
|||
#include "field_10x26_impl.h" |
|||
#elif defined(USE_FIELD_5X52) |
|||
#include "field_5x52_impl.h" |
|||
#else |
|||
#error "Please select field implementation" |
|||
#endif |
|||
|
|||
SECP256K1_INLINE static int secp256k1_fe_equal_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b) { |
|||
secp256k1_fe_t na; |
|||
secp256k1_fe_negate(&na, a, 1); |
|||
secp256k1_fe_add(&na, b); |
|||
return secp256k1_fe_normalizes_to_zero_var(&na); |
|||
} |
|||
|
|||
static int secp256k1_fe_sqrt_var(secp256k1_fe_t *r, const secp256k1_fe_t *a) { |
|||
secp256k1_fe_t x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; |
|||
int j; |
|||
|
|||
/** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
|
|||
* { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: |
|||
* 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] |
|||
*/ |
|||
|
|||
secp256k1_fe_sqr(&x2, a); |
|||
secp256k1_fe_mul(&x2, &x2, a); |
|||
|
|||
secp256k1_fe_sqr(&x3, &x2); |
|||
secp256k1_fe_mul(&x3, &x3, a); |
|||
|
|||
x6 = x3; |
|||
for (j=0; j<3; j++) { |
|||
secp256k1_fe_sqr(&x6, &x6); |
|||
} |
|||
secp256k1_fe_mul(&x6, &x6, &x3); |
|||
|
|||
x9 = x6; |
|||
for (j=0; j<3; j++) { |
|||
secp256k1_fe_sqr(&x9, &x9); |
|||
} |
|||
secp256k1_fe_mul(&x9, &x9, &x3); |
|||
|
|||
x11 = x9; |
|||
for (j=0; j<2; j++) { |
|||
secp256k1_fe_sqr(&x11, &x11); |
|||
} |
|||
secp256k1_fe_mul(&x11, &x11, &x2); |
|||
|
|||
x22 = x11; |
|||
for (j=0; j<11; j++) { |
|||
secp256k1_fe_sqr(&x22, &x22); |
|||
} |
|||
secp256k1_fe_mul(&x22, &x22, &x11); |
|||
|
|||
x44 = x22; |
|||
for (j=0; j<22; j++) { |
|||
secp256k1_fe_sqr(&x44, &x44); |
|||
} |
|||
secp256k1_fe_mul(&x44, &x44, &x22); |
|||
|
|||
x88 = x44; |
|||
for (j=0; j<44; j++) { |
|||
secp256k1_fe_sqr(&x88, &x88); |
|||
} |
|||
secp256k1_fe_mul(&x88, &x88, &x44); |
|||
|
|||
x176 = x88; |
|||
for (j=0; j<88; j++) { |
|||
secp256k1_fe_sqr(&x176, &x176); |
|||
} |
|||
secp256k1_fe_mul(&x176, &x176, &x88); |
|||
|
|||
x220 = x176; |
|||
for (j=0; j<44; j++) { |
|||
secp256k1_fe_sqr(&x220, &x220); |
|||
} |
|||
secp256k1_fe_mul(&x220, &x220, &x44); |
|||
|
|||
x223 = x220; |
|||
for (j=0; j<3; j++) { |
|||
secp256k1_fe_sqr(&x223, &x223); |
|||
} |
|||
secp256k1_fe_mul(&x223, &x223, &x3); |
|||
|
|||
/* The final result is then assembled using a sliding window over the blocks. */ |
|||
|
|||
t1 = x223; |
|||
for (j=0; j<23; j++) { |
|||
secp256k1_fe_sqr(&t1, &t1); |
|||
} |
|||
secp256k1_fe_mul(&t1, &t1, &x22); |
|||
for (j=0; j<6; j++) { |
|||
secp256k1_fe_sqr(&t1, &t1); |
|||
} |
|||
secp256k1_fe_mul(&t1, &t1, &x2); |
|||
secp256k1_fe_sqr(&t1, &t1); |
|||
secp256k1_fe_sqr(r, &t1); |
|||
|
|||
/* Check that a square root was actually calculated */ |
|||
|
|||
secp256k1_fe_sqr(&t1, r); |
|||
return secp256k1_fe_equal_var(&t1, a); |
|||
} |
|||
|
|||
static void secp256k1_fe_inv(secp256k1_fe_t *r, const secp256k1_fe_t *a) { |
|||
secp256k1_fe_t x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; |
|||
int j; |
|||
|
|||
/** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
|
|||
* { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: |
|||
* [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] |
|||
*/ |
|||
|
|||
secp256k1_fe_sqr(&x2, a); |
|||
secp256k1_fe_mul(&x2, &x2, a); |
|||
|
|||
secp256k1_fe_sqr(&x3, &x2); |
|||
secp256k1_fe_mul(&x3, &x3, a); |
|||
|
|||
x6 = x3; |
|||
for (j=0; j<3; j++) { |
|||
secp256k1_fe_sqr(&x6, &x6); |
|||
} |
|||
secp256k1_fe_mul(&x6, &x6, &x3); |
|||
|
|||
x9 = x6; |
|||
for (j=0; j<3; j++) { |
|||
secp256k1_fe_sqr(&x9, &x9); |
|||
} |
|||
secp256k1_fe_mul(&x9, &x9, &x3); |
|||
|
|||
x11 = x9; |
|||
for (j=0; j<2; j++) { |
|||
secp256k1_fe_sqr(&x11, &x11); |
|||
} |
|||
secp256k1_fe_mul(&x11, &x11, &x2); |
|||
|
|||
x22 = x11; |
|||
for (j=0; j<11; j++) { |
|||
secp256k1_fe_sqr(&x22, &x22); |
|||
} |
|||
secp256k1_fe_mul(&x22, &x22, &x11); |
|||
|
|||
x44 = x22; |
|||
for (j=0; j<22; j++) { |
|||
secp256k1_fe_sqr(&x44, &x44); |
|||
} |
|||
secp256k1_fe_mul(&x44, &x44, &x22); |
|||
|
|||
x88 = x44; |
|||
for (j=0; j<44; j++) { |
|||
secp256k1_fe_sqr(&x88, &x88); |
|||
} |
|||
secp256k1_fe_mul(&x88, &x88, &x44); |
|||
|
|||
x176 = x88; |
|||
for (j=0; j<88; j++) { |
|||
secp256k1_fe_sqr(&x176, &x176); |
|||
} |
|||
secp256k1_fe_mul(&x176, &x176, &x88); |
|||
|
|||
x220 = x176; |
|||
for (j=0; j<44; j++) { |
|||
secp256k1_fe_sqr(&x220, &x220); |
|||
} |
|||
secp256k1_fe_mul(&x220, &x220, &x44); |
|||
|
|||
x223 = x220; |
|||
for (j=0; j<3; j++) { |
|||
secp256k1_fe_sqr(&x223, &x223); |
|||
} |
|||
secp256k1_fe_mul(&x223, &x223, &x3); |
|||
|
|||
/* The final result is then assembled using a sliding window over the blocks. */ |
|||
|
|||
t1 = x223; |
|||
for (j=0; j<23; j++) { |
|||
secp256k1_fe_sqr(&t1, &t1); |
|||
} |
|||
secp256k1_fe_mul(&t1, &t1, &x22); |
|||
for (j=0; j<5; j++) { |
|||
secp256k1_fe_sqr(&t1, &t1); |
|||
} |
|||
secp256k1_fe_mul(&t1, &t1, a); |
|||
for (j=0; j<3; j++) { |
|||
secp256k1_fe_sqr(&t1, &t1); |
|||
} |
|||
secp256k1_fe_mul(&t1, &t1, &x2); |
|||
for (j=0; j<2; j++) { |
|||
secp256k1_fe_sqr(&t1, &t1); |
|||
} |
|||
secp256k1_fe_mul(r, a, &t1); |
|||
} |
|||
|
|||
static void secp256k1_fe_inv_var(secp256k1_fe_t *r, const secp256k1_fe_t *a) { |
|||
#if defined(USE_FIELD_INV_BUILTIN) |
|||
secp256k1_fe_inv(r, a); |
|||
#elif defined(USE_FIELD_INV_NUM) |
|||
secp256k1_num_t n, m; |
|||
static const secp256k1_fe_t negone = SECP256K1_FE_CONST( |
|||
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
|||
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFC2E |
|||
); |
|||
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ |
|||
static const unsigned char prime[32] = { |
|||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F |
|||
}; |
|||
unsigned char b[32]; |
|||
secp256k1_fe_t c = *a; |
|||
secp256k1_fe_normalize_var(&c); |
|||
secp256k1_fe_get_b32(b, &c); |
|||
secp256k1_num_set_bin(&n, b, 32); |
|||
secp256k1_num_set_bin(&m, prime, 32); |
|||
secp256k1_num_mod_inverse(&n, &n, &m); |
|||
secp256k1_num_get_bin(b, 32, &n); |
|||
VERIFY_CHECK(secp256k1_fe_set_b32(r, b)); |
|||
/* Verify the result is the (unique) valid inverse using non-GMP code. */ |
|||
secp256k1_fe_mul(&c, &c, r); |
|||
secp256k1_fe_add(&c, &negone); |
|||
CHECK(secp256k1_fe_normalizes_to_zero_var(&c)); |
|||
#else |
|||
#error "Please select field inverse implementation" |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe_t *r, const secp256k1_fe_t *a) { |
|||
secp256k1_fe_t u; |
|||
size_t i; |
|||
if (len < 1) { |
|||
return; |
|||
} |
|||
|
|||
VERIFY_CHECK((r + len <= a) || (a + len <= r)); |
|||
|
|||
r[0] = a[0]; |
|||
|
|||
i = 0; |
|||
while (++i < len) { |
|||
secp256k1_fe_mul(&r[i], &r[i - 1], &a[i]); |
|||
} |
|||
|
|||
secp256k1_fe_inv_var(&u, &r[--i]); |
|||
|
|||
while (i > 0) { |
|||
int j = i--; |
|||
secp256k1_fe_mul(&r[j], &r[i], &u); |
|||
secp256k1_fe_mul(&u, &u, &a[j]); |
|||
} |
|||
|
|||
r[0] = u; |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,74 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014, 2015 Thomas Daede, Cory Fields * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#define USE_BASIC_CONFIG 1 |
|||
|
|||
#include "basic-config.h" |
|||
#include "include/secp256k1.h" |
|||
#include "field_impl.h" |
|||
#include "scalar_impl.h" |
|||
#include "group_impl.h" |
|||
#include "ecmult_gen_impl.h" |
|||
|
|||
static void default_error_callback_fn(const char* str, void* data) { |
|||
(void)data; |
|||
fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); |
|||
abort(); |
|||
} |
|||
|
|||
static const callback_t default_error_callback = { |
|||
default_error_callback_fn, |
|||
NULL |
|||
}; |
|||
|
|||
int main(int argc, char **argv) { |
|||
secp256k1_ecmult_gen_context_t ctx; |
|||
int inner; |
|||
int outer; |
|||
FILE* fp; |
|||
|
|||
(void)argc; |
|||
(void)argv; |
|||
|
|||
fp = fopen("src/ecmult_static_context.h","w"); |
|||
if (fp == NULL) { |
|||
fprintf(stderr, "Could not open src/ecmult_static_context.h for writing!\n"); |
|||
return -1; |
|||
} |
|||
|
|||
fprintf(fp, "#ifndef _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); |
|||
fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); |
|||
fprintf(fp, "#include \"group.h\"\n"); |
|||
fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n"); |
|||
fprintf(fp, "static const secp256k1_ge_storage_t secp256k1_ecmult_static_context[64][16] = {\n"); |
|||
|
|||
secp256k1_ecmult_gen_context_init(&ctx); |
|||
secp256k1_ecmult_gen_context_build(&ctx, &default_error_callback); |
|||
for(outer = 0; outer != 64; outer++) { |
|||
fprintf(fp,"{\n"); |
|||
for(inner = 0; inner != 16; inner++) { |
|||
fprintf(fp," SC(%uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu)", SECP256K1_GE_STORAGE_CONST_GET((*ctx.prec)[outer][inner])); |
|||
if (inner != 15) { |
|||
fprintf(fp,",\n"); |
|||
} else { |
|||
fprintf(fp,"\n"); |
|||
} |
|||
} |
|||
if (outer != 63) { |
|||
fprintf(fp,"},\n"); |
|||
} else { |
|||
fprintf(fp,"}\n"); |
|||
} |
|||
} |
|||
fprintf(fp,"};\n"); |
|||
secp256k1_ecmult_gen_context_clear(&ctx); |
|||
|
|||
fprintf(fp, "#undef SC\n"); |
|||
fprintf(fp, "#endif\n"); |
|||
fclose(fp); |
|||
|
|||
return 0; |
|||
} |
@ -0,0 +1,141 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_GROUP_ |
|||
#define _SECP256K1_GROUP_ |
|||
|
|||
#include "num.h" |
|||
#include "field.h" |
|||
|
|||
/** A group element of the secp256k1 curve, in affine coordinates. */ |
|||
typedef struct { |
|||
secp256k1_fe_t x; |
|||
secp256k1_fe_t y; |
|||
int infinity; /* whether this represents the point at infinity */ |
|||
} secp256k1_ge_t; |
|||
|
|||
#define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0} |
|||
#define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} |
|||
|
|||
/** A group element of the secp256k1 curve, in jacobian coordinates. */ |
|||
typedef struct { |
|||
secp256k1_fe_t x; /* actual X: x/z^2 */ |
|||
secp256k1_fe_t y; /* actual Y: y/z^3 */ |
|||
secp256k1_fe_t z; |
|||
int infinity; /* whether this represents the point at infinity */ |
|||
} secp256k1_gej_t; |
|||
|
|||
#define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0} |
|||
#define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} |
|||
|
|||
typedef struct { |
|||
secp256k1_fe_storage_t x; |
|||
secp256k1_fe_storage_t y; |
|||
} secp256k1_ge_storage_t; |
|||
|
|||
#define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))} |
|||
|
|||
#define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y) |
|||
|
|||
/** Set a group element equal to the point at infinity */ |
|||
static void secp256k1_ge_set_infinity(secp256k1_ge_t *r); |
|||
|
|||
/** Set a group element equal to the point with given X and Y coordinates */ |
|||
static void secp256k1_ge_set_xy(secp256k1_ge_t *r, const secp256k1_fe_t *x, const secp256k1_fe_t *y); |
|||
|
|||
/** Set a group element (affine) equal to the point with the given X coordinate, and given oddness
|
|||
* for Y. Return value indicates whether the result is valid. */ |
|||
static int secp256k1_ge_set_xo_var(secp256k1_ge_t *r, const secp256k1_fe_t *x, int odd); |
|||
|
|||
/** Check whether a group element is the point at infinity. */ |
|||
static int secp256k1_ge_is_infinity(const secp256k1_ge_t *a); |
|||
|
|||
/** Check whether a group element is valid (i.e., on the curve). */ |
|||
static int secp256k1_ge_is_valid_var(const secp256k1_ge_t *a); |
|||
|
|||
static void secp256k1_ge_neg(secp256k1_ge_t *r, const secp256k1_ge_t *a); |
|||
|
|||
/** Set a group element equal to another which is given in jacobian coordinates */ |
|||
static void secp256k1_ge_set_gej(secp256k1_ge_t *r, secp256k1_gej_t *a); |
|||
|
|||
/** Set a batch of group elements equal to the inputs given in jacobian coordinates */ |
|||
static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge_t *r, const secp256k1_gej_t *a, const callback_t *cb); |
|||
|
|||
/** Set a batch of group elements equal to the inputs given in jacobian
|
|||
* coordinates (with known z-ratios). zr must contain the known z-ratios such |
|||
* that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. */ |
|||
static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge_t *r, const secp256k1_gej_t *a, const secp256k1_fe_t *zr); |
|||
|
|||
/** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to
|
|||
* the same global z "denominator". zr must contain the known z-ratios such |
|||
* that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y |
|||
* coordinates of the result are stored in r, the common z coordinate is |
|||
* stored in globalz. */ |
|||
static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge_t *r, secp256k1_fe_t *globalz, const secp256k1_gej_t *a, const secp256k1_fe_t *zr); |
|||
|
|||
/** Set a group element (jacobian) equal to the point at infinity. */ |
|||
static void secp256k1_gej_set_infinity(secp256k1_gej_t *r); |
|||
|
|||
/** Set a group element (jacobian) equal to the point with given X and Y coordinates. */ |
|||
static void secp256k1_gej_set_xy(secp256k1_gej_t *r, const secp256k1_fe_t *x, const secp256k1_fe_t *y); |
|||
|
|||
/** Set a group element (jacobian) equal to another which is given in affine coordinates. */ |
|||
static void secp256k1_gej_set_ge(secp256k1_gej_t *r, const secp256k1_ge_t *a); |
|||
|
|||
/** Compare the X coordinate of a group element (jacobian). */ |
|||
static int secp256k1_gej_eq_x_var(const secp256k1_fe_t *x, const secp256k1_gej_t *a); |
|||
|
|||
/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ |
|||
static void secp256k1_gej_neg(secp256k1_gej_t *r, const secp256k1_gej_t *a); |
|||
|
|||
/** Check whether a group element is the point at infinity. */ |
|||
static int secp256k1_gej_is_infinity(const secp256k1_gej_t *a); |
|||
|
|||
/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0).
|
|||
* a may not be zero. Constant time. */ |
|||
static void secp256k1_gej_double_nonzero(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr); |
|||
|
|||
/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). */ |
|||
static void secp256k1_gej_double_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr); |
|||
|
|||
/** Set r equal to the sum of a and b. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */ |
|||
static void secp256k1_gej_add_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_gej_t *b, secp256k1_fe_t *rzr); |
|||
|
|||
/** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */ |
|||
static void secp256k1_gej_add_ge(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b); |
|||
|
|||
/** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient
|
|||
than secp256k1_gej_add_var. It is identical to secp256k1_gej_add_ge but without constant-time |
|||
guarantee, and b is allowed to be infinity. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */ |
|||
static void secp256k1_gej_add_ge_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b, secp256k1_fe_t *rzr); |
|||
|
|||
/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */ |
|||
static void secp256k1_gej_add_zinv_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b, const secp256k1_fe_t *bzinv); |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ |
|||
static void secp256k1_ge_mul_lambda(secp256k1_ge_t *r, const secp256k1_ge_t *a); |
|||
#endif |
|||
|
|||
/** Clear a secp256k1_gej_t to prevent leaking sensitive information. */ |
|||
static void secp256k1_gej_clear(secp256k1_gej_t *r); |
|||
|
|||
/** Clear a secp256k1_ge_t to prevent leaking sensitive information. */ |
|||
static void secp256k1_ge_clear(secp256k1_ge_t *r); |
|||
|
|||
/** Convert a group element to the storage type. */ |
|||
static void secp256k1_ge_to_storage(secp256k1_ge_storage_t *r, const secp256k1_ge_t*); |
|||
|
|||
/** Convert a group element back from the storage type. */ |
|||
static void secp256k1_ge_from_storage(secp256k1_ge_t *r, const secp256k1_ge_storage_t*); |
|||
|
|||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ |
|||
static void secp256k1_ge_storage_cmov(secp256k1_ge_storage_t *r, const secp256k1_ge_storage_t *a, int flag); |
|||
|
|||
/** Rescale a jacobian point by b which must be non-zero. Constant-time. */ |
|||
static void secp256k1_gej_rescale(secp256k1_gej_t *r, const secp256k1_fe_t *b); |
|||
|
|||
#endif |
@ -0,0 +1,634 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_GROUP_IMPL_H_ |
|||
#define _SECP256K1_GROUP_IMPL_H_ |
|||
|
|||
#include <string.h> |
|||
|
|||
#include "num.h" |
|||
#include "field.h" |
|||
#include "group.h" |
|||
|
|||
/** Generator for secp256k1, value 'g' defined in
|
|||
* "Standards for Efficient Cryptography" (SEC2) 2.7.1. |
|||
*/ |
|||
static const secp256k1_ge_t secp256k1_ge_const_g = SECP256K1_GE_CONST( |
|||
0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL, |
|||
0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL, |
|||
0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL, |
|||
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL |
|||
); |
|||
|
|||
static void secp256k1_ge_set_gej_zinv(secp256k1_ge_t *r, const secp256k1_gej_t *a, const secp256k1_fe_t *zi) { |
|||
secp256k1_fe_t zi2; |
|||
secp256k1_fe_t zi3; |
|||
secp256k1_fe_sqr(&zi2, zi); |
|||
secp256k1_fe_mul(&zi3, &zi2, zi); |
|||
secp256k1_fe_mul(&r->x, &a->x, &zi2); |
|||
secp256k1_fe_mul(&r->y, &a->y, &zi3); |
|||
r->infinity = a->infinity; |
|||
} |
|||
|
|||
static void secp256k1_ge_set_infinity(secp256k1_ge_t *r) { |
|||
r->infinity = 1; |
|||
} |
|||
|
|||
static void secp256k1_ge_set_xy(secp256k1_ge_t *r, const secp256k1_fe_t *x, const secp256k1_fe_t *y) { |
|||
r->infinity = 0; |
|||
r->x = *x; |
|||
r->y = *y; |
|||
} |
|||
|
|||
static int secp256k1_ge_is_infinity(const secp256k1_ge_t *a) { |
|||
return a->infinity; |
|||
} |
|||
|
|||
static void secp256k1_ge_neg(secp256k1_ge_t *r, const secp256k1_ge_t *a) { |
|||
*r = *a; |
|||
secp256k1_fe_normalize_weak(&r->y); |
|||
secp256k1_fe_negate(&r->y, &r->y, 1); |
|||
} |
|||
|
|||
static void secp256k1_ge_set_gej(secp256k1_ge_t *r, secp256k1_gej_t *a) { |
|||
secp256k1_fe_t z2, z3; |
|||
r->infinity = a->infinity; |
|||
secp256k1_fe_inv(&a->z, &a->z); |
|||
secp256k1_fe_sqr(&z2, &a->z); |
|||
secp256k1_fe_mul(&z3, &a->z, &z2); |
|||
secp256k1_fe_mul(&a->x, &a->x, &z2); |
|||
secp256k1_fe_mul(&a->y, &a->y, &z3); |
|||
secp256k1_fe_set_int(&a->z, 1); |
|||
r->x = a->x; |
|||
r->y = a->y; |
|||
} |
|||
|
|||
static void secp256k1_ge_set_gej_var(secp256k1_ge_t *r, secp256k1_gej_t *a) { |
|||
secp256k1_fe_t z2, z3; |
|||
r->infinity = a->infinity; |
|||
if (a->infinity) { |
|||
return; |
|||
} |
|||
secp256k1_fe_inv_var(&a->z, &a->z); |
|||
secp256k1_fe_sqr(&z2, &a->z); |
|||
secp256k1_fe_mul(&z3, &a->z, &z2); |
|||
secp256k1_fe_mul(&a->x, &a->x, &z2); |
|||
secp256k1_fe_mul(&a->y, &a->y, &z3); |
|||
secp256k1_fe_set_int(&a->z, 1); |
|||
r->x = a->x; |
|||
r->y = a->y; |
|||
} |
|||
|
|||
static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge_t *r, const secp256k1_gej_t *a, const callback_t *cb) { |
|||
secp256k1_fe_t *az; |
|||
secp256k1_fe_t *azi; |
|||
size_t i; |
|||
size_t count = 0; |
|||
az = (secp256k1_fe_t *)checked_malloc(cb, sizeof(secp256k1_fe_t) * len); |
|||
for (i = 0; i < len; i++) { |
|||
if (!a[i].infinity) { |
|||
az[count++] = a[i].z; |
|||
} |
|||
} |
|||
|
|||
azi = (secp256k1_fe_t *)checked_malloc(cb, sizeof(secp256k1_fe_t) * count); |
|||
secp256k1_fe_inv_all_var(count, azi, az); |
|||
free(az); |
|||
|
|||
count = 0; |
|||
for (i = 0; i < len; i++) { |
|||
r[i].infinity = a[i].infinity; |
|||
if (!a[i].infinity) { |
|||
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &azi[count++]); |
|||
} |
|||
} |
|||
free(azi); |
|||
} |
|||
|
|||
static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge_t *r, const secp256k1_gej_t *a, const secp256k1_fe_t *zr) { |
|||
size_t i = len - 1; |
|||
secp256k1_fe_t zi; |
|||
|
|||
if (len < 1) |
|||
return; |
|||
|
|||
/* Compute the inverse of the last z coordinate, and use it to compute the last affine output. */ |
|||
secp256k1_fe_inv(&zi, &a[i].z); |
|||
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi); |
|||
|
|||
/* Work out way backwards, using the z-ratios to scale the x/y values. */ |
|||
while (i > 0) { |
|||
secp256k1_fe_mul(&zi, &zi, &zr[i]); |
|||
i--; |
|||
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi); |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge_t *r, secp256k1_fe_t *globalz, const secp256k1_gej_t *a, const secp256k1_fe_t *zr) { |
|||
size_t i = len - 1; |
|||
secp256k1_fe_t zs; |
|||
|
|||
if (len < 1) |
|||
return; |
|||
|
|||
/* The z of the final point gives us the "global Z" for the table. */ |
|||
r[i].x = a[i].x; |
|||
r[i].y = a[i].y; |
|||
*globalz = a[i].z; |
|||
r[i].infinity = 0; |
|||
zs = zr[i]; |
|||
|
|||
/* Work our way backwards, using the z-ratios to scale the x/y values. */ |
|||
while (i > 0) { |
|||
if (i != len - 1) { |
|||
secp256k1_fe_mul(&zs, &zs, &zr[i]); |
|||
} |
|||
i--; |
|||
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs); |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_gej_set_infinity(secp256k1_gej_t *r) { |
|||
r->infinity = 1; |
|||
secp256k1_fe_set_int(&r->x, 0); |
|||
secp256k1_fe_set_int(&r->y, 0); |
|||
secp256k1_fe_set_int(&r->z, 0); |
|||
} |
|||
|
|||
static void secp256k1_gej_set_xy(secp256k1_gej_t *r, const secp256k1_fe_t *x, const secp256k1_fe_t *y) { |
|||
r->infinity = 0; |
|||
r->x = *x; |
|||
r->y = *y; |
|||
secp256k1_fe_set_int(&r->z, 1); |
|||
} |
|||
|
|||
static void secp256k1_gej_clear(secp256k1_gej_t *r) { |
|||
r->infinity = 0; |
|||
secp256k1_fe_clear(&r->x); |
|||
secp256k1_fe_clear(&r->y); |
|||
secp256k1_fe_clear(&r->z); |
|||
} |
|||
|
|||
static void secp256k1_ge_clear(secp256k1_ge_t *r) { |
|||
r->infinity = 0; |
|||
secp256k1_fe_clear(&r->x); |
|||
secp256k1_fe_clear(&r->y); |
|||
} |
|||
|
|||
static int secp256k1_ge_set_xo_var(secp256k1_ge_t *r, const secp256k1_fe_t *x, int odd) { |
|||
secp256k1_fe_t x2, x3, c; |
|||
r->x = *x; |
|||
secp256k1_fe_sqr(&x2, x); |
|||
secp256k1_fe_mul(&x3, x, &x2); |
|||
r->infinity = 0; |
|||
secp256k1_fe_set_int(&c, 7); |
|||
secp256k1_fe_add(&c, &x3); |
|||
if (!secp256k1_fe_sqrt_var(&r->y, &c)) { |
|||
return 0; |
|||
} |
|||
secp256k1_fe_normalize_var(&r->y); |
|||
if (secp256k1_fe_is_odd(&r->y) != odd) { |
|||
secp256k1_fe_negate(&r->y, &r->y, 1); |
|||
} |
|||
return 1; |
|||
} |
|||
|
|||
static void secp256k1_gej_set_ge(secp256k1_gej_t *r, const secp256k1_ge_t *a) { |
|||
r->infinity = a->infinity; |
|||
r->x = a->x; |
|||
r->y = a->y; |
|||
secp256k1_fe_set_int(&r->z, 1); |
|||
} |
|||
|
|||
static int secp256k1_gej_eq_x_var(const secp256k1_fe_t *x, const secp256k1_gej_t *a) { |
|||
secp256k1_fe_t r, r2; |
|||
VERIFY_CHECK(!a->infinity); |
|||
secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x); |
|||
r2 = a->x; secp256k1_fe_normalize_weak(&r2); |
|||
return secp256k1_fe_equal_var(&r, &r2); |
|||
} |
|||
|
|||
static void secp256k1_gej_neg(secp256k1_gej_t *r, const secp256k1_gej_t *a) { |
|||
r->infinity = a->infinity; |
|||
r->x = a->x; |
|||
r->y = a->y; |
|||
r->z = a->z; |
|||
secp256k1_fe_normalize_weak(&r->y); |
|||
secp256k1_fe_negate(&r->y, &r->y, 1); |
|||
} |
|||
|
|||
static int secp256k1_gej_is_infinity(const secp256k1_gej_t *a) { |
|||
return a->infinity; |
|||
} |
|||
|
|||
static int secp256k1_gej_is_valid_var(const secp256k1_gej_t *a) { |
|||
secp256k1_fe_t y2, x3, z2, z6; |
|||
if (a->infinity) { |
|||
return 0; |
|||
} |
|||
/** y^2 = x^3 + 7
|
|||
* (Y/Z^3)^2 = (X/Z^2)^3 + 7 |
|||
* Y^2 / Z^6 = X^3 / Z^6 + 7 |
|||
* Y^2 = X^3 + 7*Z^6 |
|||
*/ |
|||
secp256k1_fe_sqr(&y2, &a->y); |
|||
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); |
|||
secp256k1_fe_sqr(&z2, &a->z); |
|||
secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2); |
|||
secp256k1_fe_mul_int(&z6, 7); |
|||
secp256k1_fe_add(&x3, &z6); |
|||
secp256k1_fe_normalize_weak(&x3); |
|||
return secp256k1_fe_equal_var(&y2, &x3); |
|||
} |
|||
|
|||
static int secp256k1_ge_is_valid_var(const secp256k1_ge_t *a) { |
|||
secp256k1_fe_t y2, x3, c; |
|||
if (a->infinity) { |
|||
return 0; |
|||
} |
|||
/* y^2 = x^3 + 7 */ |
|||
secp256k1_fe_sqr(&y2, &a->y); |
|||
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); |
|||
secp256k1_fe_set_int(&c, 7); |
|||
secp256k1_fe_add(&x3, &c); |
|||
secp256k1_fe_normalize_weak(&x3); |
|||
return secp256k1_fe_equal_var(&y2, &x3); |
|||
} |
|||
|
|||
static void secp256k1_gej_double_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr) { |
|||
/* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate */ |
|||
secp256k1_fe_t t1,t2,t3,t4; |
|||
/** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
|
|||
* Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have |
|||
* y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. |
|||
*/ |
|||
r->infinity = a->infinity; |
|||
if (r->infinity) { |
|||
if (rzr) { |
|||
secp256k1_fe_set_int(rzr, 1); |
|||
} |
|||
return; |
|||
} |
|||
|
|||
if (rzr) { |
|||
*rzr = a->y; |
|||
secp256k1_fe_normalize_weak(rzr); |
|||
secp256k1_fe_mul_int(rzr, 2); |
|||
} |
|||
|
|||
secp256k1_fe_mul(&r->z, &a->z, &a->y); |
|||
secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ |
|||
secp256k1_fe_sqr(&t1, &a->x); |
|||
secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ |
|||
secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ |
|||
secp256k1_fe_sqr(&t3, &a->y); |
|||
secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ |
|||
secp256k1_fe_sqr(&t4, &t3); |
|||
secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ |
|||
secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ |
|||
r->x = t3; |
|||
secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ |
|||
secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ |
|||
secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ |
|||
secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ |
|||
secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ |
|||
secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ |
|||
secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ |
|||
secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ |
|||
secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ |
|||
} |
|||
|
|||
static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr) { |
|||
VERIFY_CHECK(!secp256k1_gej_is_infinity(a)); |
|||
secp256k1_gej_double_var(r, a, rzr); |
|||
} |
|||
|
|||
static void secp256k1_gej_add_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_gej_t *b, secp256k1_fe_t *rzr) { |
|||
/* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ |
|||
secp256k1_fe_t z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; |
|||
|
|||
if (a->infinity) { |
|||
VERIFY_CHECK(rzr == NULL); |
|||
*r = *b; |
|||
return; |
|||
} |
|||
|
|||
if (b->infinity) { |
|||
if (rzr) { |
|||
secp256k1_fe_set_int(rzr, 1); |
|||
} |
|||
*r = *a; |
|||
return; |
|||
} |
|||
|
|||
r->infinity = 0; |
|||
secp256k1_fe_sqr(&z22, &b->z); |
|||
secp256k1_fe_sqr(&z12, &a->z); |
|||
secp256k1_fe_mul(&u1, &a->x, &z22); |
|||
secp256k1_fe_mul(&u2, &b->x, &z12); |
|||
secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z); |
|||
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); |
|||
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); |
|||
secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); |
|||
if (secp256k1_fe_normalizes_to_zero_var(&h)) { |
|||
if (secp256k1_fe_normalizes_to_zero_var(&i)) { |
|||
secp256k1_gej_double_var(r, a, rzr); |
|||
} else { |
|||
if (rzr) { |
|||
secp256k1_fe_set_int(rzr, 0); |
|||
} |
|||
r->infinity = 1; |
|||
} |
|||
return; |
|||
} |
|||
secp256k1_fe_sqr(&i2, &i); |
|||
secp256k1_fe_sqr(&h2, &h); |
|||
secp256k1_fe_mul(&h3, &h, &h2); |
|||
secp256k1_fe_mul(&h, &h, &b->z); |
|||
if (rzr) { |
|||
*rzr = h; |
|||
} |
|||
secp256k1_fe_mul(&r->z, &a->z, &h); |
|||
secp256k1_fe_mul(&t, &u1, &h2); |
|||
r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); |
|||
secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); |
|||
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); |
|||
secp256k1_fe_add(&r->y, &h3); |
|||
} |
|||
|
|||
static void secp256k1_gej_add_ge_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b, secp256k1_fe_t *rzr) { |
|||
/* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ |
|||
secp256k1_fe_t z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; |
|||
if (a->infinity) { |
|||
VERIFY_CHECK(rzr == NULL); |
|||
secp256k1_gej_set_ge(r, b); |
|||
return; |
|||
} |
|||
if (b->infinity) { |
|||
if (rzr) { |
|||
secp256k1_fe_set_int(rzr, 1); |
|||
} |
|||
*r = *a; |
|||
return; |
|||
} |
|||
r->infinity = 0; |
|||
|
|||
secp256k1_fe_sqr(&z12, &a->z); |
|||
u1 = a->x; secp256k1_fe_normalize_weak(&u1); |
|||
secp256k1_fe_mul(&u2, &b->x, &z12); |
|||
s1 = a->y; secp256k1_fe_normalize_weak(&s1); |
|||
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); |
|||
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); |
|||
secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); |
|||
if (secp256k1_fe_normalizes_to_zero_var(&h)) { |
|||
if (secp256k1_fe_normalizes_to_zero_var(&i)) { |
|||
secp256k1_gej_double_var(r, a, rzr); |
|||
} else { |
|||
if (rzr) { |
|||
secp256k1_fe_set_int(rzr, 0); |
|||
} |
|||
r->infinity = 1; |
|||
} |
|||
return; |
|||
} |
|||
secp256k1_fe_sqr(&i2, &i); |
|||
secp256k1_fe_sqr(&h2, &h); |
|||
secp256k1_fe_mul(&h3, &h, &h2); |
|||
if (rzr) { |
|||
*rzr = h; |
|||
} |
|||
secp256k1_fe_mul(&r->z, &a->z, &h); |
|||
secp256k1_fe_mul(&t, &u1, &h2); |
|||
r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); |
|||
secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); |
|||
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); |
|||
secp256k1_fe_add(&r->y, &h3); |
|||
} |
|||
|
|||
static void secp256k1_gej_add_zinv_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b, const secp256k1_fe_t *bzinv) { |
|||
/* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ |
|||
secp256k1_fe_t az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; |
|||
|
|||
if (b->infinity) { |
|||
*r = *a; |
|||
return; |
|||
} |
|||
if (a->infinity) { |
|||
secp256k1_fe_t bzinv2, bzinv3; |
|||
r->infinity = b->infinity; |
|||
secp256k1_fe_sqr(&bzinv2, bzinv); |
|||
secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv); |
|||
secp256k1_fe_mul(&r->x, &b->x, &bzinv2); |
|||
secp256k1_fe_mul(&r->y, &b->y, &bzinv3); |
|||
secp256k1_fe_set_int(&r->z, 1); |
|||
return; |
|||
} |
|||
r->infinity = 0; |
|||
|
|||
/** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
|
|||
* secp256k1's isomorphism we can multiply the Z coordinates on both sides |
|||
* by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1). |
|||
* This means that (rx,ry,rz) can be calculated as |
|||
* (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz. |
|||
* The variable az below holds the modified Z coordinate for a, which is used |
|||
* for the computation of rx and ry, but not for rz. |
|||
*/ |
|||
secp256k1_fe_mul(&az, &a->z, bzinv); |
|||
|
|||
secp256k1_fe_sqr(&z12, &az); |
|||
u1 = a->x; secp256k1_fe_normalize_weak(&u1); |
|||
secp256k1_fe_mul(&u2, &b->x, &z12); |
|||
s1 = a->y; secp256k1_fe_normalize_weak(&s1); |
|||
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az); |
|||
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); |
|||
secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); |
|||
if (secp256k1_fe_normalizes_to_zero_var(&h)) { |
|||
if (secp256k1_fe_normalizes_to_zero_var(&i)) { |
|||
secp256k1_gej_double_var(r, a, NULL); |
|||
} else { |
|||
r->infinity = 1; |
|||
} |
|||
return; |
|||
} |
|||
secp256k1_fe_sqr(&i2, &i); |
|||
secp256k1_fe_sqr(&h2, &h); |
|||
secp256k1_fe_mul(&h3, &h, &h2); |
|||
r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h); |
|||
secp256k1_fe_mul(&t, &u1, &h2); |
|||
r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); |
|||
secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); |
|||
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); |
|||
secp256k1_fe_add(&r->y, &h3); |
|||
} |
|||
|
|||
|
|||
static void secp256k1_gej_add_ge(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b) { |
|||
/* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */ |
|||
static const secp256k1_fe_t fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); |
|||
secp256k1_fe_t zz, u1, u2, s1, s2, t, tt, m, n, q, rr; |
|||
secp256k1_fe_t m_alt, rr_alt; |
|||
int infinity, degenerate; |
|||
VERIFY_CHECK(!b->infinity); |
|||
VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); |
|||
|
|||
/** In:
|
|||
* Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks. |
|||
* In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002. |
|||
* we find as solution for a unified addition/doubling formula: |
|||
* lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation. |
|||
* x3 = lambda^2 - (x1 + x2) |
|||
* 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2). |
|||
* |
|||
* Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives: |
|||
* U1 = X1*Z2^2, U2 = X2*Z1^2 |
|||
* S1 = Y1*Z2^3, S2 = Y2*Z1^3 |
|||
* Z = Z1*Z2 |
|||
* T = U1+U2 |
|||
* M = S1+S2 |
|||
* Q = T*M^2 |
|||
* R = T^2-U1*U2 |
|||
* X3 = 4*(R^2-Q) |
|||
* Y3 = 4*(R*(3*Q-2*R^2)-M^4) |
|||
* Z3 = 2*M*Z |
|||
* (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.) |
|||
* |
|||
* This formula has the benefit of being the same for both addition |
|||
* of distinct points and doubling. However, it breaks down in the |
|||
* case that either point is infinity, or that y1 = -y2. We handle |
|||
* these cases in the following ways: |
|||
* |
|||
* - If b is infinity we simply bail by means of a VERIFY_CHECK. |
|||
* |
|||
* - If a is infinity, we detect this, and at the end of the |
|||
* computation replace the result (which will be meaningless, |
|||
* but we compute to be constant-time) with b.x : b.y : 1. |
|||
* |
|||
* - If a = -b, we have y1 = -y2, which is a degenerate case. |
|||
* But here the answer is infinity, so we simply set the |
|||
* infinity flag of the result, overriding the computed values |
|||
* without even needing to cmov. |
|||
* |
|||
* - If y1 = -y2 but x1 != x2, which does occur thanks to certain |
|||
* properties of our curve (specifically, 1 has nontrivial cube |
|||
* roots in our field, and the curve equation has no x coefficient) |
|||
* then the answer is not infinity but also not given by the above |
|||
* equation. In this case, we cmov in place an alternate expression |
|||
* for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these |
|||
* expressions for lambda are defined, they are equal, and can be |
|||
* obtained from each other by multiplication by (y1 + y2)/(y1 + y2) |
|||
* then substitution of x^3 + 7 for y^2 (using the curve equation). |
|||
* For all pairs of nonzero points (a, b) at least one is defined, |
|||
* so this covers everything. |
|||
*/ |
|||
|
|||
secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */ |
|||
u1 = a->x; secp256k1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ |
|||
secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ |
|||
s1 = a->y; secp256k1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ |
|||
secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z2^2 (1) */ |
|||
secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ |
|||
t = u1; secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ |
|||
m = s1; secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ |
|||
secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */ |
|||
secp256k1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ |
|||
secp256k1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ |
|||
secp256k1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ |
|||
/** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
|
|||
* case that Z = z1z2 = 0, and this is special-cased later on). */ |
|||
degenerate = secp256k1_fe_normalizes_to_zero(&m) & |
|||
secp256k1_fe_normalizes_to_zero(&rr); |
|||
/* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
|
|||
* This means either x1 == beta*x2 or beta*x1 == x2, where beta is |
|||
* a nontrivial cube root of one. In either case, an alternate |
|||
* non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2), |
|||
* so we set R/M equal to this. */ |
|||
rr_alt = s1; |
|||
secp256k1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ |
|||
secp256k1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ |
|||
|
|||
secp256k1_fe_cmov(&rr_alt, &rr, !degenerate); |
|||
secp256k1_fe_cmov(&m_alt, &m, !degenerate); |
|||
/* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
|
|||
* From here on out Ralt and Malt represent the numerator |
|||
* and denominator of lambda; R and M represent the explicit |
|||
* expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ |
|||
secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ |
|||
secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ |
|||
/* These two lines use the observation that either M == Malt or M == 0,
|
|||
* so M^3 * Malt is either Malt^4 (which is computed by squaring), or |
|||
* zero (which is "computed" by cmov). So the cost is one squaring |
|||
* versus two multiplications. */ |
|||
secp256k1_fe_sqr(&n, &n); |
|||
secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ |
|||
secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ |
|||
secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ |
|||
infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity); |
|||
secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ |
|||
secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */ |
|||
secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ |
|||
secp256k1_fe_normalize_weak(&t); |
|||
r->x = t; /* r->x = Ralt^2-Q (1) */ |
|||
secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ |
|||
secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ |
|||
secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ |
|||
secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ |
|||
secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ |
|||
secp256k1_fe_normalize_weak(&r->y); |
|||
secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ |
|||
secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ |
|||
|
|||
/** In case a->infinity == 1, replace r with (b->x, b->y, 1). */ |
|||
secp256k1_fe_cmov(&r->x, &b->x, a->infinity); |
|||
secp256k1_fe_cmov(&r->y, &b->y, a->infinity); |
|||
secp256k1_fe_cmov(&r->z, &fe_1, a->infinity); |
|||
r->infinity = infinity; |
|||
} |
|||
|
|||
static void secp256k1_gej_rescale(secp256k1_gej_t *r, const secp256k1_fe_t *s) { |
|||
/* Operations: 4 mul, 1 sqr */ |
|||
secp256k1_fe_t zz; |
|||
VERIFY_CHECK(!secp256k1_fe_is_zero(s)); |
|||
secp256k1_fe_sqr(&zz, s); |
|||
secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ |
|||
secp256k1_fe_mul(&r->y, &r->y, &zz); |
|||
secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ |
|||
secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */ |
|||
} |
|||
|
|||
static void secp256k1_ge_to_storage(secp256k1_ge_storage_t *r, const secp256k1_ge_t *a) { |
|||
secp256k1_fe_t x, y; |
|||
VERIFY_CHECK(!a->infinity); |
|||
x = a->x; |
|||
secp256k1_fe_normalize(&x); |
|||
y = a->y; |
|||
secp256k1_fe_normalize(&y); |
|||
secp256k1_fe_to_storage(&r->x, &x); |
|||
secp256k1_fe_to_storage(&r->y, &y); |
|||
} |
|||
|
|||
static void secp256k1_ge_from_storage(secp256k1_ge_t *r, const secp256k1_ge_storage_t *a) { |
|||
secp256k1_fe_from_storage(&r->x, &a->x); |
|||
secp256k1_fe_from_storage(&r->y, &a->y); |
|||
r->infinity = 0; |
|||
} |
|||
|
|||
static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage_t *r, const secp256k1_ge_storage_t *a, int flag) { |
|||
secp256k1_fe_storage_cmov(&r->x, &a->x, flag); |
|||
secp256k1_fe_storage_cmov(&r->y, &a->y, flag); |
|||
} |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
static void secp256k1_ge_mul_lambda(secp256k1_ge_t *r, const secp256k1_ge_t *a) { |
|||
static const secp256k1_fe_t beta = SECP256K1_FE_CONST( |
|||
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, |
|||
0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul |
|||
); |
|||
*r = *a; |
|||
secp256k1_fe_mul(&r->x, &r->x, &beta); |
|||
} |
|||
#endif |
|||
|
|||
#endif |
@ -0,0 +1,41 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_HASH_ |
|||
#define _SECP256K1_HASH_ |
|||
|
|||
#include <stdlib.h> |
|||
#include <stdint.h> |
|||
|
|||
typedef struct { |
|||
uint32_t s[32]; |
|||
uint32_t buf[16]; /* In big endian */ |
|||
size_t bytes; |
|||
} secp256k1_sha256_t; |
|||
|
|||
static void secp256k1_sha256_initialize(secp256k1_sha256_t *hash); |
|||
static void secp256k1_sha256_write(secp256k1_sha256_t *hash, const unsigned char *data, size_t size); |
|||
static void secp256k1_sha256_finalize(secp256k1_sha256_t *hash, unsigned char *out32); |
|||
|
|||
typedef struct { |
|||
secp256k1_sha256_t inner, outer; |
|||
} secp256k1_hmac_sha256_t; |
|||
|
|||
static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256_t *hash, const unsigned char *key, size_t size); |
|||
static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256_t *hash, const unsigned char *data, size_t size); |
|||
static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256_t *hash, unsigned char *out32); |
|||
|
|||
typedef struct { |
|||
unsigned char v[32]; |
|||
unsigned char k[32]; |
|||
int retry; |
|||
} secp256k1_rfc6979_hmac_sha256_t; |
|||
|
|||
static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256_t *rng, const unsigned char *key, size_t keylen); |
|||
static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256_t *rng, unsigned char *out, size_t outlen); |
|||
static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256_t *rng); |
|||
|
|||
#endif |
@ -0,0 +1,283 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_HASH_IMPL_H_ |
|||
#define _SECP256K1_HASH_IMPL_H_ |
|||
|
|||
#include "hash.h" |
|||
|
|||
#include <stdlib.h> |
|||
#include <stdint.h> |
|||
#include <string.h> |
|||
|
|||
#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) |
|||
#define Maj(x,y,z) (((x) & (y)) | ((z) & ((x) | (y)))) |
|||
#define Sigma0(x) (((x) >> 2 | (x) << 30) ^ ((x) >> 13 | (x) << 19) ^ ((x) >> 22 | (x) << 10)) |
|||
#define Sigma1(x) (((x) >> 6 | (x) << 26) ^ ((x) >> 11 | (x) << 21) ^ ((x) >> 25 | (x) << 7)) |
|||
#define sigma0(x) (((x) >> 7 | (x) << 25) ^ ((x) >> 18 | (x) << 14) ^ ((x) >> 3)) |
|||
#define sigma1(x) (((x) >> 17 | (x) << 15) ^ ((x) >> 19 | (x) << 13) ^ ((x) >> 10)) |
|||
|
|||
#define Round(a,b,c,d,e,f,g,h,k,w) do { \ |
|||
uint32_t t1 = (h) + Sigma1(e) + Ch((e), (f), (g)) + (k) + (w); \ |
|||
uint32_t t2 = Sigma0(a) + Maj((a), (b), (c)); \ |
|||
(d) += t1; \ |
|||
(h) = t1 + t2; \ |
|||
} while(0) |
|||
|
|||
#ifdef WORDS_BIGENDIAN |
|||
#define BE32(x) (x) |
|||
#else |
|||
#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) |
|||
#endif |
|||
|
|||
static void secp256k1_sha256_initialize(secp256k1_sha256_t *hash) { |
|||
hash->s[0] = 0x6a09e667ul; |
|||
hash->s[1] = 0xbb67ae85ul; |
|||
hash->s[2] = 0x3c6ef372ul; |
|||
hash->s[3] = 0xa54ff53aul; |
|||
hash->s[4] = 0x510e527ful; |
|||
hash->s[5] = 0x9b05688cul; |
|||
hash->s[6] = 0x1f83d9abul; |
|||
hash->s[7] = 0x5be0cd19ul; |
|||
hash->bytes = 0; |
|||
} |
|||
|
|||
/** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */ |
|||
static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) { |
|||
uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7]; |
|||
uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; |
|||
|
|||
Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = BE32(chunk[0])); |
|||
Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = BE32(chunk[1])); |
|||
Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = BE32(chunk[2])); |
|||
Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = BE32(chunk[3])); |
|||
Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = BE32(chunk[4])); |
|||
Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = BE32(chunk[5])); |
|||
Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = BE32(chunk[6])); |
|||
Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = BE32(chunk[7])); |
|||
Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = BE32(chunk[8])); |
|||
Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = BE32(chunk[9])); |
|||
Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = BE32(chunk[10])); |
|||
Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = BE32(chunk[11])); |
|||
Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = BE32(chunk[12])); |
|||
Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = BE32(chunk[13])); |
|||
Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = BE32(chunk[14])); |
|||
Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = BE32(chunk[15])); |
|||
|
|||
Round(a, b, c, d, e, f, g, h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1)); |
|||
Round(h, a, b, c, d, e, f, g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2)); |
|||
Round(g, h, a, b, c, d, e, f, 0x0fc19dc6, w2 += sigma1(w0) + w11 + sigma0(w3)); |
|||
Round(f, g, h, a, b, c, d, e, 0x240ca1cc, w3 += sigma1(w1) + w12 + sigma0(w4)); |
|||
Round(e, f, g, h, a, b, c, d, 0x2de92c6f, w4 += sigma1(w2) + w13 + sigma0(w5)); |
|||
Round(d, e, f, g, h, a, b, c, 0x4a7484aa, w5 += sigma1(w3) + w14 + sigma0(w6)); |
|||
Round(c, d, e, f, g, h, a, b, 0x5cb0a9dc, w6 += sigma1(w4) + w15 + sigma0(w7)); |
|||
Round(b, c, d, e, f, g, h, a, 0x76f988da, w7 += sigma1(w5) + w0 + sigma0(w8)); |
|||
Round(a, b, c, d, e, f, g, h, 0x983e5152, w8 += sigma1(w6) + w1 + sigma0(w9)); |
|||
Round(h, a, b, c, d, e, f, g, 0xa831c66d, w9 += sigma1(w7) + w2 + sigma0(w10)); |
|||
Round(g, h, a, b, c, d, e, f, 0xb00327c8, w10 += sigma1(w8) + w3 + sigma0(w11)); |
|||
Round(f, g, h, a, b, c, d, e, 0xbf597fc7, w11 += sigma1(w9) + w4 + sigma0(w12)); |
|||
Round(e, f, g, h, a, b, c, d, 0xc6e00bf3, w12 += sigma1(w10) + w5 + sigma0(w13)); |
|||
Round(d, e, f, g, h, a, b, c, 0xd5a79147, w13 += sigma1(w11) + w6 + sigma0(w14)); |
|||
Round(c, d, e, f, g, h, a, b, 0x06ca6351, w14 += sigma1(w12) + w7 + sigma0(w15)); |
|||
Round(b, c, d, e, f, g, h, a, 0x14292967, w15 += sigma1(w13) + w8 + sigma0(w0)); |
|||
|
|||
Round(a, b, c, d, e, f, g, h, 0x27b70a85, w0 += sigma1(w14) + w9 + sigma0(w1)); |
|||
Round(h, a, b, c, d, e, f, g, 0x2e1b2138, w1 += sigma1(w15) + w10 + sigma0(w2)); |
|||
Round(g, h, a, b, c, d, e, f, 0x4d2c6dfc, w2 += sigma1(w0) + w11 + sigma0(w3)); |
|||
Round(f, g, h, a, b, c, d, e, 0x53380d13, w3 += sigma1(w1) + w12 + sigma0(w4)); |
|||
Round(e, f, g, h, a, b, c, d, 0x650a7354, w4 += sigma1(w2) + w13 + sigma0(w5)); |
|||
Round(d, e, f, g, h, a, b, c, 0x766a0abb, w5 += sigma1(w3) + w14 + sigma0(w6)); |
|||
Round(c, d, e, f, g, h, a, b, 0x81c2c92e, w6 += sigma1(w4) + w15 + sigma0(w7)); |
|||
Round(b, c, d, e, f, g, h, a, 0x92722c85, w7 += sigma1(w5) + w0 + sigma0(w8)); |
|||
Round(a, b, c, d, e, f, g, h, 0xa2bfe8a1, w8 += sigma1(w6) + w1 + sigma0(w9)); |
|||
Round(h, a, b, c, d, e, f, g, 0xa81a664b, w9 += sigma1(w7) + w2 + sigma0(w10)); |
|||
Round(g, h, a, b, c, d, e, f, 0xc24b8b70, w10 += sigma1(w8) + w3 + sigma0(w11)); |
|||
Round(f, g, h, a, b, c, d, e, 0xc76c51a3, w11 += sigma1(w9) + w4 + sigma0(w12)); |
|||
Round(e, f, g, h, a, b, c, d, 0xd192e819, w12 += sigma1(w10) + w5 + sigma0(w13)); |
|||
Round(d, e, f, g, h, a, b, c, 0xd6990624, w13 += sigma1(w11) + w6 + sigma0(w14)); |
|||
Round(c, d, e, f, g, h, a, b, 0xf40e3585, w14 += sigma1(w12) + w7 + sigma0(w15)); |
|||
Round(b, c, d, e, f, g, h, a, 0x106aa070, w15 += sigma1(w13) + w8 + sigma0(w0)); |
|||
|
|||
Round(a, b, c, d, e, f, g, h, 0x19a4c116, w0 += sigma1(w14) + w9 + sigma0(w1)); |
|||
Round(h, a, b, c, d, e, f, g, 0x1e376c08, w1 += sigma1(w15) + w10 + sigma0(w2)); |
|||
Round(g, h, a, b, c, d, e, f, 0x2748774c, w2 += sigma1(w0) + w11 + sigma0(w3)); |
|||
Round(f, g, h, a, b, c, d, e, 0x34b0bcb5, w3 += sigma1(w1) + w12 + sigma0(w4)); |
|||
Round(e, f, g, h, a, b, c, d, 0x391c0cb3, w4 += sigma1(w2) + w13 + sigma0(w5)); |
|||
Round(d, e, f, g, h, a, b, c, 0x4ed8aa4a, w5 += sigma1(w3) + w14 + sigma0(w6)); |
|||
Round(c, d, e, f, g, h, a, b, 0x5b9cca4f, w6 += sigma1(w4) + w15 + sigma0(w7)); |
|||
Round(b, c, d, e, f, g, h, a, 0x682e6ff3, w7 += sigma1(w5) + w0 + sigma0(w8)); |
|||
Round(a, b, c, d, e, f, g, h, 0x748f82ee, w8 += sigma1(w6) + w1 + sigma0(w9)); |
|||
Round(h, a, b, c, d, e, f, g, 0x78a5636f, w9 += sigma1(w7) + w2 + sigma0(w10)); |
|||
Round(g, h, a, b, c, d, e, f, 0x84c87814, w10 += sigma1(w8) + w3 + sigma0(w11)); |
|||
Round(f, g, h, a, b, c, d, e, 0x8cc70208, w11 += sigma1(w9) + w4 + sigma0(w12)); |
|||
Round(e, f, g, h, a, b, c, d, 0x90befffa, w12 += sigma1(w10) + w5 + sigma0(w13)); |
|||
Round(d, e, f, g, h, a, b, c, 0xa4506ceb, w13 += sigma1(w11) + w6 + sigma0(w14)); |
|||
Round(c, d, e, f, g, h, a, b, 0xbef9a3f7, w14 + sigma1(w12) + w7 + sigma0(w15)); |
|||
Round(b, c, d, e, f, g, h, a, 0xc67178f2, w15 + sigma1(w13) + w8 + sigma0(w0)); |
|||
|
|||
s[0] += a; |
|||
s[1] += b; |
|||
s[2] += c; |
|||
s[3] += d; |
|||
s[4] += e; |
|||
s[5] += f; |
|||
s[6] += g; |
|||
s[7] += h; |
|||
} |
|||
|
|||
static void secp256k1_sha256_write(secp256k1_sha256_t *hash, const unsigned char *data, size_t len) { |
|||
size_t bufsize = hash->bytes & 0x3F; |
|||
hash->bytes += len; |
|||
while (bufsize + len >= 64) { |
|||
/* Fill the buffer, and process it. */ |
|||
memcpy(((unsigned char*)hash->buf) + bufsize, data, 64 - bufsize); |
|||
data += 64 - bufsize; |
|||
len -= 64 - bufsize; |
|||
secp256k1_sha256_transform(hash->s, hash->buf); |
|||
bufsize = 0; |
|||
} |
|||
if (len) { |
|||
/* Fill the buffer with what remains. */ |
|||
memcpy(((unsigned char*)hash->buf) + bufsize, data, len); |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_sha256_finalize(secp256k1_sha256_t *hash, unsigned char *out32) { |
|||
static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; |
|||
uint32_t sizedesc[2]; |
|||
uint32_t out[8]; |
|||
int i = 0; |
|||
sizedesc[0] = BE32(hash->bytes >> 29); |
|||
sizedesc[1] = BE32(hash->bytes << 3); |
|||
secp256k1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); |
|||
secp256k1_sha256_write(hash, (const unsigned char*)sizedesc, 8); |
|||
for (i = 0; i < 8; i++) { |
|||
out[i] = BE32(hash->s[i]); |
|||
hash->s[i] = 0; |
|||
} |
|||
memcpy(out32, (const unsigned char*)out, 32); |
|||
} |
|||
|
|||
static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256_t *hash, const unsigned char *key, size_t keylen) { |
|||
int n; |
|||
unsigned char rkey[64]; |
|||
if (keylen <= 64) { |
|||
memcpy(rkey, key, keylen); |
|||
memset(rkey + keylen, 0, 64 - keylen); |
|||
} else { |
|||
secp256k1_sha256_t sha256; |
|||
secp256k1_sha256_initialize(&sha256); |
|||
secp256k1_sha256_write(&sha256, key, keylen); |
|||
secp256k1_sha256_finalize(&sha256, rkey); |
|||
memset(rkey + 32, 0, 32); |
|||
} |
|||
|
|||
secp256k1_sha256_initialize(&hash->outer); |
|||
for (n = 0; n < 64; n++) { |
|||
rkey[n] ^= 0x5c; |
|||
} |
|||
secp256k1_sha256_write(&hash->outer, rkey, 64); |
|||
|
|||
secp256k1_sha256_initialize(&hash->inner); |
|||
for (n = 0; n < 64; n++) { |
|||
rkey[n] ^= 0x5c ^ 0x36; |
|||
} |
|||
secp256k1_sha256_write(&hash->inner, rkey, 64); |
|||
memset(rkey, 0, 64); |
|||
} |
|||
|
|||
static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256_t *hash, const unsigned char *data, size_t size) { |
|||
secp256k1_sha256_write(&hash->inner, data, size); |
|||
} |
|||
|
|||
static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256_t *hash, unsigned char *out32) { |
|||
unsigned char temp[32]; |
|||
secp256k1_sha256_finalize(&hash->inner, temp); |
|||
secp256k1_sha256_write(&hash->outer, temp, 32); |
|||
memset(temp, 0, 32); |
|||
secp256k1_sha256_finalize(&hash->outer, out32); |
|||
} |
|||
|
|||
|
|||
static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256_t *rng, const unsigned char *key, size_t keylen) { |
|||
secp256k1_hmac_sha256_t hmac; |
|||
static const unsigned char zero[1] = {0x00}; |
|||
static const unsigned char one[1] = {0x01}; |
|||
|
|||
memset(rng->v, 0x01, 32); /* RFC6979 3.2.b. */ |
|||
memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */ |
|||
|
|||
/* RFC6979 3.2.d. */ |
|||
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, rng->v, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, zero, 1); |
|||
secp256k1_hmac_sha256_write(&hmac, key, keylen); |
|||
secp256k1_hmac_sha256_finalize(&hmac, rng->k); |
|||
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, rng->v, 32); |
|||
secp256k1_hmac_sha256_finalize(&hmac, rng->v); |
|||
|
|||
/* RFC6979 3.2.f. */ |
|||
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, rng->v, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, one, 1); |
|||
secp256k1_hmac_sha256_write(&hmac, key, keylen); |
|||
secp256k1_hmac_sha256_finalize(&hmac, rng->k); |
|||
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, rng->v, 32); |
|||
secp256k1_hmac_sha256_finalize(&hmac, rng->v); |
|||
rng->retry = 0; |
|||
} |
|||
|
|||
static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256_t *rng, unsigned char *out, size_t outlen) { |
|||
/* RFC6979 3.2.h. */ |
|||
static const unsigned char zero[1] = {0x00}; |
|||
if (rng->retry) { |
|||
secp256k1_hmac_sha256_t hmac; |
|||
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, rng->v, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, zero, 1); |
|||
secp256k1_hmac_sha256_finalize(&hmac, rng->k); |
|||
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, rng->v, 32); |
|||
secp256k1_hmac_sha256_finalize(&hmac, rng->v); |
|||
} |
|||
|
|||
while (outlen > 0) { |
|||
secp256k1_hmac_sha256_t hmac; |
|||
int now = outlen; |
|||
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); |
|||
secp256k1_hmac_sha256_write(&hmac, rng->v, 32); |
|||
secp256k1_hmac_sha256_finalize(&hmac, rng->v); |
|||
if (now > 32) { |
|||
now = 32; |
|||
} |
|||
memcpy(out, rng->v, now); |
|||
out += now; |
|||
outlen -= now; |
|||
} |
|||
|
|||
rng->retry = 1; |
|||
} |
|||
|
|||
static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256_t *rng) { |
|||
memset(rng->k, 0, 32); |
|||
memset(rng->v, 0, 32); |
|||
rng->retry = 0; |
|||
} |
|||
|
|||
|
|||
#undef Round |
|||
#undef sigma0 |
|||
#undef sigma1 |
|||
#undef Sigma0 |
|||
#undef Sigma1 |
|||
#undef Ch |
|||
#undef Maj |
|||
#undef ReadBE32 |
|||
#undef WriteBE32 |
|||
|
|||
#endif |
@ -0,0 +1,152 @@ |
|||
/* src/libsecp256k1-config.h. Generated from libsecp256k1-config.h.in by configure. */ |
|||
/* src/libsecp256k1-config.h.in. Generated from configure.ac by autoheader. */ |
|||
|
|||
#ifndef LIBSECP256K1_CONFIG_H |
|||
#define LIBSECP256K1_CONFIG_H |
|||
|
|||
/* Define if building universal (internal helper macro) */ |
|||
/* #undef AC_APPLE_UNIVERSAL_BUILD */ |
|||
|
|||
/* Define this symbol to enable the ECDH module */ |
|||
#define ENABLE_MODULE_ECDH |
|||
/* #undef ENABLE_MODULE_ECDH */ |
|||
|
|||
/* Define this symbol to enable the Pedersen / zero knowledge range proof
|
|||
module */ |
|||
#define ENABLE_MODULE_RANGEPROOF |
|||
/* #undef ENABLE_MODULE_RANGEPROOF */ |
|||
|
|||
/* Define this symbol to enable the Schnorr signature module */ |
|||
#define ENABLE_MODULE_SCHNORR |
|||
/* #undef ENABLE_MODULE_SCHNORR */ |
|||
|
|||
/* Define this symbol if OpenSSL EC functions are available */ |
|||
#define ENABLE_OPENSSL_TESTS 1 |
|||
|
|||
/* Define this symbol if __builtin_clzll is available */ |
|||
#define HAVE_BUILTIN_CLZLL 1 |
|||
|
|||
/* Define this symbol if __builtin_expect is available */ |
|||
#define HAVE_BUILTIN_EXPECT 1 |
|||
|
|||
/* Define to 1 if you have the <dlfcn.h> header file. */ |
|||
#define HAVE_DLFCN_H 1 |
|||
|
|||
/* Define to 1 if you have the <inttypes.h> header file. */ |
|||
#define HAVE_INTTYPES_H 1 |
|||
|
|||
/* Define this symbol if libcrypto is installed */ |
|||
#define HAVE_LIBCRYPTO 0 |
|||
|
|||
/* Define this symbol if libgmp is installed */ |
|||
/* #undef HAVE_LIBGMP */ |
|||
|
|||
/* Define to 1 if you have the <memory.h> header file. */ |
|||
#define HAVE_MEMORY_H 1 |
|||
|
|||
/* Define to 1 if you have the <stdint.h> header file. */ |
|||
#define HAVE_STDINT_H 1 |
|||
|
|||
/* Define to 1 if you have the <stdlib.h> header file. */ |
|||
#define HAVE_STDLIB_H 1 |
|||
|
|||
/* Define to 1 if you have the <strings.h> header file. */ |
|||
#define HAVE_STRINGS_H 1 |
|||
|
|||
/* Define to 1 if you have the <string.h> header file. */ |
|||
#define HAVE_STRING_H 1 |
|||
|
|||
/* Define to 1 if you have the <sys/stat.h> header file. */ |
|||
#define HAVE_SYS_STAT_H 1 |
|||
|
|||
/* Define to 1 if you have the <sys/types.h> header file. */ |
|||
#define HAVE_SYS_TYPES_H 1 |
|||
|
|||
/* Define to 1 if you have the <unistd.h> header file. */ |
|||
#define HAVE_UNISTD_H 1 |
|||
|
|||
/* Define to 1 if the system has the type `__int128'. */ |
|||
#define HAVE___INT128 1 |
|||
|
|||
/* Define to the sub-directory where libtool stores uninstalled libraries. */ |
|||
#define LT_OBJDIR ".libs/" |
|||
|
|||
/* Name of package */ |
|||
#define PACKAGE "libsecp256k1" |
|||
|
|||
/* Define to the address where bug reports for this package should be sent. */ |
|||
#define PACKAGE_BUGREPORT "" |
|||
|
|||
/* Define to the full name of this package. */ |
|||
#define PACKAGE_NAME "libsecp256k1" |
|||
|
|||
/* Define to the full name and version of this package. */ |
|||
#define PACKAGE_STRING "libsecp256k1 0.1" |
|||
|
|||
/* Define to the one symbol short name of this package. */ |
|||
#define PACKAGE_TARNAME "libsecp256k1" |
|||
|
|||
/* Define to the home page for this package. */ |
|||
#define PACKAGE_URL "" |
|||
|
|||
/* Define to the version of this package. */ |
|||
#define PACKAGE_VERSION "0.1" |
|||
|
|||
/* Define to 1 if you have the ANSI C header files. */ |
|||
#define STDC_HEADERS 1 |
|||
|
|||
/* Define this symbol to enable x86_64 assembly optimizations */ |
|||
#define USE_ASM_X86_64 1 |
|||
|
|||
/* Define this symbol to use a statically generated ecmult table */ |
|||
#define USE_ECMULT_STATIC_PRECOMPUTATION 1 |
|||
|
|||
/* Define this symbol to use endomorphism optimization */ |
|||
/* #undef USE_ENDOMORPHISM */ |
|||
|
|||
/* Define this symbol to use the FIELD_10X26 implementation */ |
|||
/* #undef USE_FIELD_10X26 */ |
|||
|
|||
/* Define this symbol to use the FIELD_5X52 implementation */ |
|||
#define USE_FIELD_5X52 1 |
|||
|
|||
/* Define this symbol to use the native field inverse implementation */ |
|||
#define USE_FIELD_INV_BUILTIN 1 |
|||
|
|||
/* Define this symbol to use the num-based field inverse implementation */ |
|||
/* #undef USE_FIELD_INV_NUM */ |
|||
|
|||
/* Define this symbol to use the gmp implementation for num */ |
|||
/* #undef USE_NUM_GMP */ |
|||
|
|||
/* Define this symbol to use no num implementation */ |
|||
#define USE_NUM_NONE 1 |
|||
|
|||
/* Define this symbol to use the 4x64 scalar implementation */ |
|||
#define USE_SCALAR_4X64 1 |
|||
|
|||
/* Define this symbol to use the 8x32 scalar implementation */ |
|||
/* #undef USE_SCALAR_8X32 */ |
|||
|
|||
/* Define this symbol to use the native scalar inverse implementation */ |
|||
#define USE_SCALAR_INV_BUILTIN 1 |
|||
|
|||
/* Define this symbol to use the num-based scalar inverse implementation */ |
|||
/* #undef USE_SCALAR_INV_NUM */ |
|||
|
|||
/* Version number of package */ |
|||
#define VERSION "0.1" |
|||
|
|||
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
|
|||
significant byte first (like Motorola and SPARC, unlike Intel). */ |
|||
#if defined AC_APPLE_UNIVERSAL_BUILD |
|||
# if defined __BIG_ENDIAN__ |
|||
# define WORDS_BIGENDIAN 1 |
|||
# endif |
|||
#else |
|||
# ifndef WORDS_BIGENDIAN |
|||
/* # undef WORDS_BIGENDIAN */ |
|||
# endif |
|||
#endif |
|||
|
|||
#endif /*LIBSECP256K1_CONFIG_H*/ |
@ -0,0 +1,9 @@ |
|||
include_HEADERS += include/secp256k1_ecdh.h |
|||
noinst_HEADERS += src/modules/ecdh/main_impl.h |
|||
noinst_HEADERS += src/modules/ecdh/tests_impl.h |
|||
if USE_BENCHMARK |
|||
noinst_PROGRAMS += bench_ecdh |
|||
bench_ecdh_SOURCES = src/bench_ecdh.c |
|||
bench_ecdh_LDADD = libsecp256k1.la $(SECP_LIBS) |
|||
bench_ecdh_LDFLAGS = -static |
|||
endif |
@ -0,0 +1,53 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2015 Andrew Poelstra * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_MODULE_ECDH_MAIN_ |
|||
#define _SECP256K1_MODULE_ECDH_MAIN_ |
|||
|
|||
#include "ecmult_const_impl.h" |
|||
|
|||
int secp256k1_ecdh(const secp256k1_context_t* ctx, unsigned char *result, const secp256k1_pubkey_t *point, const unsigned char *scalar) { |
|||
int ret = 0; |
|||
int overflow = 0; |
|||
secp256k1_gej_t res; |
|||
secp256k1_ge_t pt; |
|||
secp256k1_scalar_t s; |
|||
ARG_CHECK(result != NULL); |
|||
ARG_CHECK(point != NULL); |
|||
ARG_CHECK(scalar != NULL); |
|||
(void)ctx; |
|||
|
|||
secp256k1_pubkey_load(ctx, &pt, point); |
|||
secp256k1_scalar_set_b32(&s, scalar, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&s)) { |
|||
ret = 0; |
|||
} else { |
|||
unsigned char x[32]; |
|||
unsigned char y[1]; |
|||
secp256k1_sha256_t sha; |
|||
|
|||
secp256k1_ecmult_const(&res, &pt, &s); |
|||
secp256k1_ge_set_gej(&pt, &res); |
|||
/* Compute a hash of the point in compressed form
|
|||
* Note we cannot use secp256k1_eckey_pubkey_serialize here since it does not |
|||
* expect its output to be secret and has a timing sidechannel. */ |
|||
secp256k1_fe_normalize(&pt.x); |
|||
secp256k1_fe_normalize(&pt.y); |
|||
secp256k1_fe_get_b32(x, &pt.x); |
|||
y[0] = 0x02 | secp256k1_fe_is_odd(&pt.y); |
|||
|
|||
secp256k1_sha256_initialize(&sha); |
|||
secp256k1_sha256_write(&sha, y, sizeof(y)); |
|||
secp256k1_sha256_write(&sha, x, sizeof(x)); |
|||
secp256k1_sha256_finalize(&sha, result); |
|||
ret = 1; |
|||
} |
|||
|
|||
secp256k1_scalar_clear(&s); |
|||
return ret; |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,75 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2015 Andrew Poelstra * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_MODULE_ECDH_TESTS_ |
|||
#define _SECP256K1_MODULE_ECDH_TESTS_ |
|||
|
|||
void test_ecdh_generator_basepoint(void) { |
|||
unsigned char s_one[32] = { 0 }; |
|||
secp256k1_pubkey_t point[2]; |
|||
int i; |
|||
|
|||
s_one[31] = 1; |
|||
/* Check against pubkey creation when the basepoint is the generator */ |
|||
for (i = 0; i < 100; ++i) { |
|||
secp256k1_sha256_t sha; |
|||
unsigned char s_b32[32]; |
|||
unsigned char output_ecdh[32]; |
|||
unsigned char output_ser[32]; |
|||
unsigned char point_ser[33]; |
|||
int point_ser_len = sizeof(point_ser); |
|||
secp256k1_scalar_t s; |
|||
|
|||
random_scalar_order(&s); |
|||
secp256k1_scalar_get_b32(s_b32, &s); |
|||
|
|||
/* compute using ECDH function */ |
|||
CHECK(secp256k1_ec_pubkey_create(ctx, &point[0], s_one) == 1); |
|||
CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32) == 1); |
|||
/* compute "explicitly" */ |
|||
CHECK(secp256k1_ec_pubkey_create(ctx, &point[1], s_b32) == 1); |
|||
CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], 1) == 1); |
|||
CHECK(point_ser_len == sizeof(point_ser)); |
|||
secp256k1_sha256_initialize(&sha); |
|||
secp256k1_sha256_write(&sha, point_ser, point_ser_len); |
|||
secp256k1_sha256_finalize(&sha, output_ser); |
|||
/* compare */ |
|||
CHECK(memcmp(output_ecdh, output_ser, sizeof(output_ser)) == 0); |
|||
} |
|||
} |
|||
|
|||
void test_bad_scalar(void) { |
|||
unsigned char s_zero[32] = { 0 }; |
|||
unsigned char s_overflow[32] = { |
|||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, |
|||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, |
|||
0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, |
|||
0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 |
|||
}; |
|||
unsigned char s_rand[32] = { 0 }; |
|||
unsigned char output[32]; |
|||
secp256k1_scalar_t rand; |
|||
secp256k1_pubkey_t point; |
|||
|
|||
/* Create random point */ |
|||
random_scalar_order(&rand); |
|||
secp256k1_scalar_get_b32(s_rand, &rand); |
|||
CHECK(secp256k1_ec_pubkey_create(ctx, &point, s_rand) == 1); |
|||
|
|||
/* Try to multiply it by bad values */ |
|||
CHECK(secp256k1_ecdh(ctx, output, &point, s_zero) == 0); |
|||
CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 0); |
|||
/* ...and a good one */ |
|||
s_overflow[31] -= 1; |
|||
CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 1); |
|||
} |
|||
|
|||
void run_ecdh_tests(void) { |
|||
test_ecdh_generator_basepoint(); |
|||
test_bad_scalar(); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,15 @@ |
|||
include_HEADERS += include/secp256k1_rangeproof.h |
|||
noinst_HEADERS += src/modules/rangeproof/main_impl.h |
|||
noinst_HEADERS += src/modules/rangeproof/pedersen.h |
|||
noinst_HEADERS += src/modules/rangeproof/pedersen_impl.h |
|||
noinst_HEADERS += src/modules/rangeproof/borromean.h |
|||
noinst_HEADERS += src/modules/rangeproof/borromean_impl.h |
|||
noinst_HEADERS += src/modules/rangeproof/rangeproof.h |
|||
noinst_HEADERS += src/modules/rangeproof/rangeproof_impl.h |
|||
noinst_HEADERS += src/modules/rangeproof/tests_impl.h |
|||
if USE_BENCHMARK |
|||
noinst_PROGRAMS += bench_rangeproof |
|||
bench_rangeproof_SOURCES = src/bench_rangeproof.c |
|||
bench_rangeproof_LDADD = libsecp256k1.la $(SECP_LIBS) |
|||
bench_rangeproof_LDFLAGS = -static |
|||
endif |
@ -0,0 +1,24 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014, 2015 Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
|
|||
#ifndef _SECP256K1_BORROMEAN_H_ |
|||
#define _SECP256K1_BORROMEAN_H_ |
|||
|
|||
#include "scalar.h" |
|||
#include "field.h" |
|||
#include "group.h" |
|||
#include "ecmult.h" |
|||
#include "ecmult_gen.h" |
|||
|
|||
int secp256k1_borromean_verify(const secp256k1_ecmult_context_t* ecmult_ctx, secp256k1_scalar_t *evalues, const unsigned char *e0, const secp256k1_scalar_t *s, |
|||
const secp256k1_gej_t *pubs, const int *rsizes, int nrings, const unsigned char *m, int mlen); |
|||
|
|||
int secp256k1_borromean_sign(const secp256k1_ecmult_context_t* ecmult_ctx, const secp256k1_ecmult_gen_context_t *ecmult_gen_ctx, |
|||
unsigned char *e0, secp256k1_scalar_t *s, const secp256k1_gej_t *pubs, const secp256k1_scalar_t *k, const secp256k1_scalar_t *sec, |
|||
const int *rsizes, const int *secidx, int nrings, const unsigned char *m, int mlen); |
|||
|
|||
#endif |
@ -0,0 +1,201 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014, 2015 Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
|
|||
#ifndef _SECP256K1_BORROMEAN_IMPL_H_ |
|||
#define _SECP256K1_BORROMEAN_IMPL_H_ |
|||
|
|||
#include "scalar.h" |
|||
#include "field.h" |
|||
#include "group.h" |
|||
#include "ecmult.h" |
|||
#include "ecmult_gen.h" |
|||
#include "borromean.h" |
|||
|
|||
#include <limits.h> |
|||
|
|||
#ifdef WORDS_BIGENDIAN |
|||
#define BE32(x) (x) |
|||
#else |
|||
#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) |
|||
#endif |
|||
|
|||
SECP256K1_INLINE static void secp256k1_borromean_hash(unsigned char *hash, const unsigned char *m, int mlen, const unsigned char *e, int elen, |
|||
int ridx, int eidx) { |
|||
uint32_t ring; |
|||
uint32_t epos; |
|||
secp256k1_sha256_t sha256_en; |
|||
secp256k1_sha256_initialize(&sha256_en); |
|||
ring = BE32((uint32_t)ridx); |
|||
epos = BE32((uint32_t)eidx); |
|||
secp256k1_sha256_write(&sha256_en, e, elen); |
|||
secp256k1_sha256_write(&sha256_en, m, mlen); |
|||
secp256k1_sha256_write(&sha256_en, (unsigned char*)&ring, 4); |
|||
secp256k1_sha256_write(&sha256_en, (unsigned char*)&epos, 4); |
|||
secp256k1_sha256_finalize(&sha256_en, hash); |
|||
} |
|||
|
|||
/** "Borromean" ring signature.
|
|||
* Verifies nrings concurrent ring signatures all sharing a challenge value. |
|||
* Signature is one s value per pubkey and a hash. |
|||
* Verification equation: |
|||
* | m = H(P_{0..}||message) (Message must contain pubkeys or a pubkey commitment) |
|||
* | For each ring i: |
|||
* | | en = to_scalar(H(e0||m||i||0)) |
|||
* | | For each pubkey j: |
|||
* | | | r = s_i_j G + en * P_i_j |
|||
* | | | e = H(r||m||i||j) |
|||
* | | | en = to_scalar(e) |
|||
* | | r_i = r |
|||
* | return e_0 ==== H(r_{0..i}||m) |
|||
*/ |
|||
int secp256k1_borromean_verify(const secp256k1_ecmult_context_t* ecmult_ctx, secp256k1_scalar_t *evalues, const unsigned char *e0, |
|||
const secp256k1_scalar_t *s, const secp256k1_gej_t *pubs, const int *rsizes, int nrings, const unsigned char *m, int mlen) { |
|||
secp256k1_gej_t rgej; |
|||
secp256k1_ge_t rge; |
|||
secp256k1_scalar_t ens; |
|||
secp256k1_sha256_t sha256_e0; |
|||
unsigned char tmp[33]; |
|||
int i; |
|||
int j; |
|||
int count; |
|||
int size; |
|||
int overflow; |
|||
VERIFY_CHECK(ecmult_ctx != NULL); |
|||
VERIFY_CHECK(e0 != NULL); |
|||
VERIFY_CHECK(s != NULL); |
|||
VERIFY_CHECK(pubs != NULL); |
|||
VERIFY_CHECK(rsizes != NULL); |
|||
VERIFY_CHECK(nrings > 0); |
|||
VERIFY_CHECK(m != NULL); |
|||
count = 0; |
|||
secp256k1_sha256_initialize(&sha256_e0); |
|||
for (i = 0; i < nrings; i++) { |
|||
VERIFY_CHECK(INT_MAX - count > rsizes[i]); |
|||
secp256k1_borromean_hash(tmp, m, mlen, e0, 32, i, 0); |
|||
secp256k1_scalar_set_b32(&ens, tmp, &overflow); |
|||
for (j = 0; j < rsizes[i]; j++) { |
|||
if (overflow || secp256k1_scalar_is_zero(&s[count]) || secp256k1_scalar_is_zero(&ens) || secp256k1_gej_is_infinity(&pubs[count])) { |
|||
return 0; |
|||
} |
|||
if (evalues) { |
|||
/*If requested, save the challenges for proof rewind.*/ |
|||
evalues[count] = ens; |
|||
} |
|||
secp256k1_ecmult(ecmult_ctx, &rgej, &pubs[count], &ens, &s[count]); |
|||
if (secp256k1_gej_is_infinity(&rgej)) { |
|||
return 0; |
|||
} |
|||
/* OPT: loop can be hoisted and split to use batch inversion across all the rings; this would make it much faster. */ |
|||
secp256k1_ge_set_gej_var(&rge, &rgej); |
|||
secp256k1_eckey_pubkey_serialize(&rge, tmp, &size, 1); |
|||
if (j != rsizes[i] - 1) { |
|||
secp256k1_borromean_hash(tmp, m, mlen, tmp, 33, i, j + 1); |
|||
secp256k1_scalar_set_b32(&ens, tmp, &overflow); |
|||
} else { |
|||
secp256k1_sha256_write(&sha256_e0, tmp, size); |
|||
} |
|||
count++; |
|||
} |
|||
} |
|||
secp256k1_sha256_write(&sha256_e0, m, mlen); |
|||
secp256k1_sha256_finalize(&sha256_e0, tmp); |
|||
return memcmp(e0, tmp, 32) == 0; |
|||
} |
|||
|
|||
int secp256k1_borromean_sign(const secp256k1_ecmult_context_t* ecmult_ctx, const secp256k1_ecmult_gen_context_t *ecmult_gen_ctx, |
|||
unsigned char *e0, secp256k1_scalar_t *s, const secp256k1_gej_t *pubs, const secp256k1_scalar_t *k, const secp256k1_scalar_t *sec, |
|||
const int *rsizes, const int *secidx, int nrings, const unsigned char *m, int mlen) { |
|||
secp256k1_gej_t rgej; |
|||
secp256k1_ge_t rge; |
|||
secp256k1_scalar_t ens; |
|||
secp256k1_sha256_t sha256_e0; |
|||
unsigned char tmp[33]; |
|||
int i; |
|||
int j; |
|||
int count; |
|||
int size; |
|||
int overflow; |
|||
VERIFY_CHECK(ecmult_ctx != NULL); |
|||
VERIFY_CHECK(ecmult_gen_ctx != NULL); |
|||
VERIFY_CHECK(e0 != NULL); |
|||
VERIFY_CHECK(s != NULL); |
|||
VERIFY_CHECK(pubs != NULL); |
|||
VERIFY_CHECK(k != NULL); |
|||
VERIFY_CHECK(sec != NULL); |
|||
VERIFY_CHECK(rsizes != NULL); |
|||
VERIFY_CHECK(secidx != NULL); |
|||
VERIFY_CHECK(nrings > 0); |
|||
VERIFY_CHECK(m != NULL); |
|||
secp256k1_sha256_initialize(&sha256_e0); |
|||
count = 0; |
|||
for (i = 0; i < nrings; i++) { |
|||
VERIFY_CHECK(INT_MAX - count > rsizes[i]); |
|||
secp256k1_ecmult_gen(ecmult_gen_ctx, &rgej, &k[i]); |
|||
secp256k1_ge_set_gej(&rge, &rgej); |
|||
if (secp256k1_gej_is_infinity(&rgej)) { |
|||
return 0; |
|||
} |
|||
secp256k1_eckey_pubkey_serialize(&rge, tmp, &size, 1); |
|||
for (j = secidx[i] + 1; j < rsizes[i]; j++) { |
|||
secp256k1_borromean_hash(tmp, m, mlen, tmp, 33, i, j); |
|||
secp256k1_scalar_set_b32(&ens, tmp, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&ens)) { |
|||
return 0; |
|||
} |
|||
/** The signing algorithm as a whole is not memory uniform so there is likely a cache sidechannel that
|
|||
* leaks which members are non-forgeries. That the forgeries themselves are variable time may leave |
|||
* an additional privacy impacting timing side-channel, but not a key loss one. |
|||
*/ |
|||
secp256k1_ecmult(ecmult_ctx, &rgej, &pubs[count + j], &ens, &s[count + j]); |
|||
if (secp256k1_gej_is_infinity(&rgej)) { |
|||
return 0; |
|||
} |
|||
secp256k1_ge_set_gej_var(&rge, &rgej); |
|||
secp256k1_eckey_pubkey_serialize(&rge, tmp, &size, 1); |
|||
} |
|||
secp256k1_sha256_write(&sha256_e0, tmp, size); |
|||
count += rsizes[i]; |
|||
} |
|||
secp256k1_sha256_write(&sha256_e0, m, mlen); |
|||
secp256k1_sha256_finalize(&sha256_e0, e0); |
|||
count = 0; |
|||
for (i = 0; i < nrings; i++) { |
|||
VERIFY_CHECK(INT_MAX - count > rsizes[i]); |
|||
secp256k1_borromean_hash(tmp, m, mlen, e0, 32, i, 0); |
|||
secp256k1_scalar_set_b32(&ens, tmp, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&ens)) { |
|||
return 0; |
|||
} |
|||
for (j = 0; j < secidx[i]; j++) { |
|||
secp256k1_ecmult(ecmult_ctx, &rgej, &pubs[count + j], &ens, &s[count + j]); |
|||
if (secp256k1_gej_is_infinity(&rgej)) { |
|||
return 0; |
|||
} |
|||
secp256k1_ge_set_gej_var(&rge, &rgej); |
|||
secp256k1_eckey_pubkey_serialize(&rge, tmp, &size, 1); |
|||
secp256k1_borromean_hash(tmp, m, mlen, tmp, 33, i, j + 1); |
|||
secp256k1_scalar_set_b32(&ens, tmp, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&ens)) { |
|||
return 0; |
|||
} |
|||
} |
|||
secp256k1_scalar_mul(&s[count + j], &ens, &sec[i]); |
|||
secp256k1_scalar_negate(&s[count + j], &s[count + j]); |
|||
secp256k1_scalar_add(&s[count + j], &s[count + j], &k[i]); |
|||
if (secp256k1_scalar_is_zero(&s[count + j])) { |
|||
return 0; |
|||
} |
|||
count += rsizes[i]; |
|||
} |
|||
secp256k1_scalar_clear(&ens); |
|||
secp256k1_ge_clear(&rge); |
|||
secp256k1_gej_clear(&rgej); |
|||
memset(tmp, 0, 33); |
|||
return 1; |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,176 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014-2015 Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef SECP256K1_MODULE_RANGEPROOF_MAIN |
|||
#define SECP256K1_MODULE_RANGEPROOF_MAIN |
|||
|
|||
#include "modules/rangeproof/pedersen_impl.h" |
|||
#include "modules/rangeproof/borromean_impl.h" |
|||
#include "modules/rangeproof/rangeproof_impl.h" |
|||
|
|||
void secp256k1_pedersen_context_initialize(secp256k1_context_t* ctx) { |
|||
secp256k1_pedersen_context_build(&ctx->pedersen_ctx, &ctx->error_callback); |
|||
} |
|||
|
|||
/* Generates a pedersen commitment: *commit = blind * G + value * G2. The commitment is 33 bytes, the blinding factor is 32 bytes.*/ |
|||
int secp256k1_pedersen_commit(const secp256k1_context_t* ctx, unsigned char *commit, unsigned char *blind, uint64_t value) { |
|||
secp256k1_gej_t rj; |
|||
secp256k1_ge_t r; |
|||
secp256k1_scalar_t sec; |
|||
int sz; |
|||
int overflow; |
|||
int ret = 0; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
ARG_CHECK(secp256k1_pedersen_context_is_built(&ctx->pedersen_ctx)); |
|||
ARG_CHECK(commit != NULL); |
|||
ARG_CHECK(blind != NULL); |
|||
secp256k1_scalar_set_b32(&sec, blind, &overflow); |
|||
if (!overflow) { |
|||
secp256k1_pedersen_ecmult(&ctx->ecmult_gen_ctx, &ctx->pedersen_ctx, &rj, &sec, value); |
|||
if (!secp256k1_gej_is_infinity(&rj)) { |
|||
secp256k1_ge_set_gej(&r, &rj); |
|||
sz = 33; |
|||
ret = secp256k1_eckey_pubkey_serialize(&r, commit, &sz, 1); |
|||
} |
|||
secp256k1_gej_clear(&rj); |
|||
secp256k1_ge_clear(&r); |
|||
} |
|||
secp256k1_scalar_clear(&sec); |
|||
return ret; |
|||
} |
|||
|
|||
/** Takes a list of n pointers to 32 byte blinding values, the first negs of which are treated with positive sign and the rest
|
|||
* negative, then calculates an additional blinding value that adds to zero. |
|||
*/ |
|||
int secp256k1_pedersen_blind_sum(const secp256k1_context_t* ctx, unsigned char *blind_out, const unsigned char * const *blinds, int n, int npositive) { |
|||
secp256k1_scalar_t acc; |
|||
secp256k1_scalar_t x; |
|||
int i; |
|||
int overflow; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(blind_out != NULL); |
|||
ARG_CHECK(blinds != NULL); |
|||
secp256k1_scalar_set_int(&acc, 0); |
|||
for (i = 0; i < n; i++) { |
|||
secp256k1_scalar_set_b32(&x, blinds[i], &overflow); |
|||
if (overflow) { |
|||
return 0; |
|||
} |
|||
if (i >= npositive) { |
|||
secp256k1_scalar_negate(&x, &x); |
|||
} |
|||
secp256k1_scalar_add(&acc, &acc, &x); |
|||
} |
|||
secp256k1_scalar_get_b32(blind_out, &acc); |
|||
secp256k1_scalar_clear(&acc); |
|||
secp256k1_scalar_clear(&x); |
|||
return 1; |
|||
} |
|||
|
|||
/* Takes two list of 33-byte commitments and sums the first set and subtracts the second and verifies that they sum to excess. */ |
|||
int secp256k1_pedersen_verify_tally(const secp256k1_context_t* ctx, const unsigned char * const *commits, int pcnt, |
|||
const unsigned char * const *ncommits, int ncnt, int64_t excess) { |
|||
secp256k1_gej_t accj; |
|||
secp256k1_ge_t add; |
|||
int i; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(!pcnt || (commits != NULL)); |
|||
ARG_CHECK(!ncnt || (ncommits != NULL)); |
|||
ARG_CHECK(secp256k1_pedersen_context_is_built(&ctx->pedersen_ctx)); |
|||
secp256k1_gej_set_infinity(&accj); |
|||
if (excess) { |
|||
uint64_t ex; |
|||
int neg; |
|||
/* Take the absolute value, and negate the result if the input was negative. */ |
|||
neg = secp256k1_sign_and_abs64(&ex, excess); |
|||
secp256k1_pedersen_ecmult_small(&ctx->pedersen_ctx, &accj, ex); |
|||
if (neg) { |
|||
secp256k1_gej_neg(&accj, &accj); |
|||
} |
|||
} |
|||
for (i = 0; i < ncnt; i++) { |
|||
if (!secp256k1_eckey_pubkey_parse(&add, ncommits[i], 33)) { |
|||
return 0; |
|||
} |
|||
secp256k1_gej_add_ge_var(&accj, &accj, &add, NULL); |
|||
} |
|||
secp256k1_gej_neg(&accj, &accj); |
|||
for (i = 0; i < pcnt; i++) { |
|||
if (!secp256k1_eckey_pubkey_parse(&add, commits[i], 33)) { |
|||
return 0; |
|||
} |
|||
secp256k1_gej_add_ge_var(&accj, &accj, &add, NULL); |
|||
} |
|||
return secp256k1_gej_is_infinity(&accj); |
|||
} |
|||
|
|||
void secp256k1_rangeproof_context_initialize(secp256k1_context_t* ctx) { |
|||
secp256k1_rangeproof_context_build(&ctx->rangeproof_ctx, &ctx->error_callback); |
|||
} |
|||
|
|||
int secp256k1_rangeproof_info(const secp256k1_context_t* ctx, int *exp, int *mantissa, |
|||
uint64_t *min_value, uint64_t *max_value, const unsigned char *proof, int plen) { |
|||
int offset; |
|||
uint64_t scale; |
|||
ARG_CHECK(exp != NULL); |
|||
ARG_CHECK(mantissa != NULL); |
|||
ARG_CHECK(min_value != NULL); |
|||
ARG_CHECK(max_value != NULL); |
|||
offset = 0; |
|||
scale = 1; |
|||
(void)ctx; |
|||
return secp256k1_rangeproof_getheader_impl(&offset, exp, mantissa, &scale, min_value, max_value, proof, plen); |
|||
} |
|||
|
|||
int secp256k1_rangeproof_rewind(const secp256k1_context_t* ctx, |
|||
unsigned char *blind_out, uint64_t *value_out, unsigned char *message_out, int *outlen, const unsigned char *nonce, |
|||
uint64_t *min_value, uint64_t *max_value, |
|||
const unsigned char *commit, const unsigned char *proof, int plen) { |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(commit != NULL); |
|||
ARG_CHECK(proof != NULL); |
|||
ARG_CHECK(min_value != NULL); |
|||
ARG_CHECK(max_value != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
ARG_CHECK(secp256k1_pedersen_context_is_built(&ctx->pedersen_ctx)); |
|||
ARG_CHECK(secp256k1_rangeproof_context_is_built(&ctx->rangeproof_ctx)); |
|||
return secp256k1_rangeproof_verify_impl(&ctx->ecmult_ctx, &ctx->ecmult_gen_ctx, &ctx->pedersen_ctx, &ctx->rangeproof_ctx, |
|||
blind_out, value_out, message_out, outlen, nonce, min_value, max_value, commit, proof, plen); |
|||
} |
|||
|
|||
int secp256k1_rangeproof_verify(const secp256k1_context_t* ctx, uint64_t *min_value, uint64_t *max_value, |
|||
const unsigned char *commit, const unsigned char *proof, int plen) { |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(commit != NULL); |
|||
ARG_CHECK(proof != NULL); |
|||
ARG_CHECK(min_value != NULL); |
|||
ARG_CHECK(max_value != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(secp256k1_pedersen_context_is_built(&ctx->pedersen_ctx)); |
|||
ARG_CHECK(secp256k1_rangeproof_context_is_built(&ctx->rangeproof_ctx)); |
|||
return secp256k1_rangeproof_verify_impl(&ctx->ecmult_ctx, NULL, &ctx->pedersen_ctx, &ctx->rangeproof_ctx, |
|||
NULL, NULL, NULL, NULL, NULL, min_value, max_value, commit, proof, plen); |
|||
} |
|||
|
|||
int secp256k1_rangeproof_sign(const secp256k1_context_t* ctx, unsigned char *proof, int *plen, uint64_t min_value, |
|||
const unsigned char *commit, const unsigned char *blind, const unsigned char *nonce, int exp, int min_bits, uint64_t value){ |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(proof != NULL); |
|||
ARG_CHECK(plen != NULL); |
|||
ARG_CHECK(commit != NULL); |
|||
ARG_CHECK(blind != NULL); |
|||
ARG_CHECK(nonce != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
ARG_CHECK(secp256k1_pedersen_context_is_built(&ctx->pedersen_ctx)); |
|||
ARG_CHECK(secp256k1_rangeproof_context_is_built(&ctx->rangeproof_ctx)); |
|||
return secp256k1_rangeproof_sign_impl(&ctx->ecmult_ctx, &ctx->ecmult_gen_ctx, &ctx->pedersen_ctx, &ctx->rangeproof_ctx, |
|||
proof, plen, min_value, commit, blind, nonce, exp, min_bits, value); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,34 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014, 2015 Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_PEDERSEN_H_ |
|||
#define _SECP256K1_PEDERSEN_H_ |
|||
|
|||
#include "group.h" |
|||
#include "scalar.h" |
|||
|
|||
#include <stdint.h> |
|||
|
|||
typedef struct { |
|||
secp256k1_ge_storage_t (*prec)[16][16]; /* prec[j][i] = 16^j * i * G + U_i */ |
|||
} secp256k1_pedersen_context_t; |
|||
|
|||
static void secp256k1_pedersen_context_init(secp256k1_pedersen_context_t* ctx); |
|||
static void secp256k1_pedersen_context_build(secp256k1_pedersen_context_t* ctx, const callback_t* cb); |
|||
static void secp256k1_pedersen_context_clone(secp256k1_pedersen_context_t *dst, |
|||
const secp256k1_pedersen_context_t* src, const callback_t* cb); |
|||
static void secp256k1_pedersen_context_clear(secp256k1_pedersen_context_t* ctx); |
|||
|
|||
static int secp256k1_pedersen_context_is_built(const secp256k1_pedersen_context_t* ctx); |
|||
|
|||
/** Multiply a small number with the generator: r = gn*G2 */ |
|||
static void secp256k1_pedersen_ecmult_small(const secp256k1_pedersen_context_t *ctx, secp256k1_gej_t *r, uint64_t gn); |
|||
|
|||
/* sec * G + value * G2. */ |
|||
static void secp256k1_pedersen_ecmult(const secp256k1_ecmult_gen_context_t *ecmult_gen_ctx, |
|||
const secp256k1_pedersen_context_t *pedersen_ctx, secp256k1_gej_t *rj, const secp256k1_scalar_t *sec, uint64_t value); |
|||
|
|||
#endif |
@ -0,0 +1,139 @@ |
|||
/***********************************************************************
|
|||
* Copyright (c) 2015 Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php. *
|
|||
***********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_PEDERSEN_IMPL_H_ |
|||
#define _SECP256K1_PEDERSEN_IMPL_H_ |
|||
|
|||
/** Alternative generator for secp256k1.
|
|||
* This is the sha256 of 'g' after DER encoding (without compression), |
|||
* which happens to be a point on the curve. |
|||
* sage: G2 = EllipticCurve ([F (0), F (7)]).lift_x(int(hashlib.sha256('0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'.decode('hex')).hexdigest(),16)) |
|||
* sage: '%x %x'%G2.xy() |
|||
*/ |
|||
static const secp256k1_ge_t secp256k1_ge_const_g2 = SECP256K1_GE_CONST( |
|||
0x50929b74UL, 0xc1a04954UL, 0xb78b4b60UL, 0x35e97a5eUL, |
|||
0x078a5a0fUL, 0x28ec96d5UL, 0x47bfee9aUL, 0xce803ac0UL, |
|||
0x31d3c686UL, 0x3973926eUL, 0x049e637cUL, 0xb1b5f40aUL, |
|||
0x36dac28aUL, 0xf1766968UL, 0xc30c2313UL, 0xf3a38904UL |
|||
); |
|||
|
|||
static void secp256k1_pedersen_context_init(secp256k1_pedersen_context_t *ctx) { |
|||
ctx->prec = NULL; |
|||
} |
|||
|
|||
static void secp256k1_pedersen_context_build(secp256k1_pedersen_context_t *ctx, const callback_t *cb) { |
|||
secp256k1_ge_t prec[256]; |
|||
secp256k1_gej_t gj; |
|||
secp256k1_gej_t nums_gej; |
|||
int i, j; |
|||
|
|||
if (ctx->prec != NULL) { |
|||
return; |
|||
} |
|||
|
|||
ctx->prec = (secp256k1_ge_storage_t (*)[16][16])checked_malloc(cb, sizeof(*ctx->prec)); |
|||
|
|||
/* get the generator */ |
|||
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g2); |
|||
|
|||
/* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ |
|||
{ |
|||
static const unsigned char nums_b32[33] = "The scalar for this x is unknown"; |
|||
secp256k1_fe_t nums_x; |
|||
secp256k1_ge_t nums_ge; |
|||
VERIFY_CHECK(secp256k1_fe_set_b32(&nums_x, nums_b32)); |
|||
VERIFY_CHECK(secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0)); |
|||
secp256k1_gej_set_ge(&nums_gej, &nums_ge); |
|||
/* Add G to make the bits in x uniformly distributed. */ |
|||
secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g2, NULL); |
|||
} |
|||
|
|||
/* compute prec. */ |
|||
{ |
|||
secp256k1_gej_t precj[256]; /* Jacobian versions of prec. */ |
|||
secp256k1_gej_t gbase; |
|||
secp256k1_gej_t numsbase; |
|||
gbase = gj; /* 16^j * G */ |
|||
numsbase = nums_gej; /* 2^j * nums. */ |
|||
for (j = 0; j < 16; j++) { |
|||
/* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */ |
|||
precj[j*16] = numsbase; |
|||
for (i = 1; i < 16; i++) { |
|||
secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL); |
|||
} |
|||
/* Multiply gbase by 16. */ |
|||
for (i = 0; i < 4; i++) { |
|||
secp256k1_gej_double_var(&gbase, &gbase, NULL); |
|||
} |
|||
/* Multiply numbase by 2. */ |
|||
secp256k1_gej_double_var(&numsbase, &numsbase, NULL); |
|||
if (j == 14) { |
|||
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ |
|||
secp256k1_gej_neg(&numsbase, &numsbase); |
|||
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); |
|||
} |
|||
} |
|||
secp256k1_ge_set_all_gej_var(256, prec, precj, cb); |
|||
} |
|||
for (j = 0; j < 16; j++) { |
|||
for (i = 0; i < 16; i++) { |
|||
secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]); |
|||
} |
|||
} |
|||
} |
|||
|
|||
static int secp256k1_pedersen_context_is_built(const secp256k1_pedersen_context_t* ctx) { |
|||
return ctx->prec != NULL; |
|||
} |
|||
|
|||
static void secp256k1_pedersen_context_clone(secp256k1_pedersen_context_t *dst, |
|||
const secp256k1_pedersen_context_t *src, const callback_t *cb) { |
|||
if (src->prec == NULL) { |
|||
dst->prec = NULL; |
|||
} else { |
|||
dst->prec = (secp256k1_ge_storage_t (*)[16][16])checked_malloc(cb, sizeof(*dst->prec)); |
|||
memcpy(dst->prec, src->prec, sizeof(*dst->prec)); |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_pedersen_context_clear(secp256k1_pedersen_context_t *ctx) { |
|||
free(ctx->prec); |
|||
ctx->prec = NULL; |
|||
} |
|||
|
|||
/* Version of secp256k1_ecmult_gen using the second generator and working only on numbers in the range [0 .. 2^64). */ |
|||
static void secp256k1_pedersen_ecmult_small(const secp256k1_pedersen_context_t *ctx, secp256k1_gej_t *r, uint64_t gn) { |
|||
secp256k1_ge_t add; |
|||
secp256k1_ge_storage_t adds; |
|||
int bits; |
|||
int i, j; |
|||
memset(&adds, 0, sizeof(adds)); |
|||
secp256k1_gej_set_infinity(r); |
|||
add.infinity = 0; |
|||
for (j = 0; j < 16; j++) { |
|||
bits = (gn >> (j * 4)) & 15; |
|||
for (i = 0; i < 16; i++) { |
|||
secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); |
|||
} |
|||
secp256k1_ge_from_storage(&add, &adds); |
|||
secp256k1_gej_add_ge(r, r, &add); |
|||
} |
|||
bits = 0; |
|||
secp256k1_ge_clear(&add); |
|||
} |
|||
|
|||
/* sec * G + value * G2. */ |
|||
SECP256K1_INLINE static void secp256k1_pedersen_ecmult(const secp256k1_ecmult_gen_context_t *ecmult_gen_ctx, |
|||
const secp256k1_pedersen_context_t *pedersen_ctx, secp256k1_gej_t *rj, const secp256k1_scalar_t *sec, uint64_t value) { |
|||
secp256k1_gej_t vj; |
|||
secp256k1_ecmult_gen(ecmult_gen_ctx, rj, sec); |
|||
secp256k1_pedersen_ecmult_small(pedersen_ctx, &vj, value); |
|||
/* FIXME: constant time. */ |
|||
secp256k1_gej_add_var(rj, rj, &vj, NULL); |
|||
secp256k1_gej_clear(&vj); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,31 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2015 Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_RANGEPROOF_H_ |
|||
#define _SECP256K1_RANGEPROOF_H_ |
|||
|
|||
#include "scalar.h" |
|||
#include "group.h" |
|||
|
|||
typedef struct { |
|||
secp256k1_ge_storage_t (*prec)[1005]; |
|||
} secp256k1_rangeproof_context_t; |
|||
|
|||
|
|||
static void secp256k1_rangeproof_context_init(secp256k1_rangeproof_context_t* ctx); |
|||
static void secp256k1_rangeproof_context_build(secp256k1_rangeproof_context_t* ctx, const callback_t* cb); |
|||
static void secp256k1_rangeproof_context_clone(secp256k1_rangeproof_context_t *dst, |
|||
const secp256k1_rangeproof_context_t* src, const callback_t* cb); |
|||
static void secp256k1_rangeproof_context_clear(secp256k1_rangeproof_context_t* ctx); |
|||
static int secp256k1_rangeproof_context_is_built(const secp256k1_rangeproof_context_t* ctx); |
|||
|
|||
static int secp256k1_rangeproof_verify_impl(const secp256k1_ecmult_context_t* ecmult_ctx, |
|||
const secp256k1_ecmult_gen_context_t* ecmult_gen_ctx, |
|||
const secp256k1_pedersen_context_t* pedersen_ctx, const secp256k1_rangeproof_context_t* rangeproof_ctx, |
|||
unsigned char *blindout, uint64_t *value_out, unsigned char *message_out, int *outlen, const unsigned char *nonce, |
|||
uint64_t *min_value, uint64_t *max_value, const unsigned char *commit, const unsigned char *proof, int plen); |
|||
|
|||
#endif |
@ -0,0 +1,735 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2015 Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_RANGEPROOF_IMPL_H_ |
|||
#define _SECP256K1_RANGEPROOF_IMPL_H_ |
|||
|
|||
#include "scalar.h" |
|||
#include "group.h" |
|||
#include "rangeproof.h" |
|||
#include "hash_impl.h" |
|||
|
|||
#include "modules/rangeproof/pedersen.h" |
|||
#include "modules/rangeproof/borromean.h" |
|||
|
|||
static const int secp256k1_rangeproof_offsets[20] = { |
|||
0, 96, 189, 276, 360, 438, 510, 579, 642, |
|||
699, 753, 801, 843, 882, 915, 942, 966, 984, |
|||
996, 1005, |
|||
}; |
|||
|
|||
static void secp256k1_rangeproof_context_init(secp256k1_rangeproof_context_t *ctx) { |
|||
ctx->prec = NULL; |
|||
} |
|||
|
|||
static void secp256k1_rangeproof_context_build(secp256k1_rangeproof_context_t *ctx, const callback_t* cb) { |
|||
secp256k1_ge_t *prec; |
|||
secp256k1_gej_t *precj; |
|||
secp256k1_gej_t gj; |
|||
secp256k1_gej_t one; |
|||
int i, pos; |
|||
|
|||
if (ctx->prec != NULL) { |
|||
return; |
|||
} |
|||
|
|||
precj = (secp256k1_gej_t (*))checked_malloc(cb, sizeof(*precj) * 1005); |
|||
if (precj == NULL) { |
|||
return; |
|||
} |
|||
prec = (secp256k1_ge_t (*))checked_malloc(cb, sizeof(*prec) * 1005); |
|||
if (prec == NULL) { |
|||
free(precj); |
|||
return; |
|||
} |
|||
|
|||
/* get the generator */ |
|||
secp256k1_gej_set_ge(&one, &secp256k1_ge_const_g2); |
|||
secp256k1_gej_neg(&one, &one); |
|||
|
|||
/* compute prec. */ |
|||
pos = 0; |
|||
for (i = 0; i < 19; i++) { |
|||
int pmax; |
|||
pmax = secp256k1_rangeproof_offsets[i + 1]; |
|||
gj = one; |
|||
while (pos < pmax) { |
|||
precj[pos] = gj; |
|||
pos++; |
|||
secp256k1_gej_double_var(&precj[pos], &gj, NULL); |
|||
pos++; |
|||
secp256k1_gej_add_var(&precj[pos], &precj[pos - 1], &gj, NULL); |
|||
pos++; |
|||
if (pos < pmax - 1) { |
|||
secp256k1_gej_double_var(&gj, &precj[pos - 2], NULL); |
|||
} |
|||
} |
|||
if (i < 18) { |
|||
secp256k1_gej_double_var(&gj, &one, NULL); |
|||
one = gj; |
|||
secp256k1_gej_double_var(&gj, &gj, NULL); |
|||
secp256k1_gej_double_var(&gj, &gj, NULL); |
|||
secp256k1_gej_add_var(&one, &one, &gj, NULL); |
|||
} |
|||
} |
|||
VERIFY_CHECK(pos == 1005); |
|||
secp256k1_ge_set_all_gej_var(1005, prec, precj, cb); |
|||
|
|||
free(precj); |
|||
|
|||
ctx->prec = (secp256k1_ge_storage_t (*)[1005])checked_malloc(cb, sizeof(*ctx->prec)); |
|||
if (ctx->prec == NULL) { |
|||
free(prec); |
|||
return; |
|||
} |
|||
|
|||
for (i = 0; i < 1005; i++) { |
|||
secp256k1_ge_to_storage(&(*ctx->prec)[i], &prec[i]); |
|||
} |
|||
free(prec); |
|||
} |
|||
|
|||
|
|||
static int secp256k1_rangeproof_context_is_built(const secp256k1_rangeproof_context_t* ctx) { |
|||
return ctx->prec != NULL; |
|||
} |
|||
|
|||
static void secp256k1_rangeproof_context_clone(secp256k1_rangeproof_context_t *dst, |
|||
const secp256k1_rangeproof_context_t *src, const callback_t* cb) { |
|||
if (src->prec == NULL) { |
|||
dst->prec = NULL; |
|||
} else { |
|||
dst->prec = (secp256k1_ge_storage_t (*)[1005])checked_malloc(cb, sizeof(*dst->prec)); |
|||
memcpy(dst->prec, src->prec, sizeof(*dst->prec)); |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_rangeproof_context_clear(secp256k1_rangeproof_context_t *ctx) { |
|||
free(ctx->prec); |
|||
ctx->prec = NULL; |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_rangeproof_pub_expand(const secp256k1_rangeproof_context_t *ctx, secp256k1_gej_t *pubs, |
|||
int exp, int *rsizes, int rings) { |
|||
secp256k1_ge_t ge; |
|||
secp256k1_ge_storage_t *basis; |
|||
int i; |
|||
int j; |
|||
int npub; |
|||
VERIFY_CHECK(exp < 19); |
|||
if (exp < 0) { |
|||
exp = 0; |
|||
} |
|||
basis = &(*ctx->prec)[secp256k1_rangeproof_offsets[exp]]; |
|||
npub = 0; |
|||
for (i = 0; i < rings; i++) { |
|||
for (j = 1; j < rsizes[i]; j++) { |
|||
secp256k1_ge_from_storage(&ge, &basis[i * 3 + j - 1]); |
|||
secp256k1_gej_add_ge_var(&pubs[npub + j], &pubs[npub], &ge, NULL); |
|||
} |
|||
npub += rsizes[i]; |
|||
} |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_rangeproof_genrand(secp256k1_scalar_t *sec, secp256k1_scalar_t *s, unsigned char *message, |
|||
int *rsizes, int rings, const unsigned char *nonce, const unsigned char *commit, const unsigned char *proof, int len) { |
|||
unsigned char tmp[32]; |
|||
unsigned char rngseed[32 + 33 + 10]; |
|||
secp256k1_rfc6979_hmac_sha256_t rng; |
|||
secp256k1_scalar_t acc; |
|||
int overflow; |
|||
int ret; |
|||
int i; |
|||
int j; |
|||
int b; |
|||
int npub; |
|||
VERIFY_CHECK(len <= 10); |
|||
memcpy(rngseed, nonce, 32); |
|||
memcpy(rngseed + 32, commit, 33); |
|||
memcpy(rngseed + 65, proof, len); |
|||
secp256k1_rfc6979_hmac_sha256_initialize(&rng, rngseed, 32 + 33 + len); |
|||
secp256k1_scalar_clear(&acc); |
|||
npub = 0; |
|||
ret = 1; |
|||
for (i = 0; i < rings; i++) { |
|||
if (i < rings - 1) { |
|||
secp256k1_rfc6979_hmac_sha256_generate(&rng, tmp, 32); |
|||
do { |
|||
secp256k1_rfc6979_hmac_sha256_generate(&rng, tmp, 32); |
|||
secp256k1_scalar_set_b32(&sec[i], tmp, &overflow); |
|||
} while (overflow || secp256k1_scalar_is_zero(&sec[i])); |
|||
secp256k1_scalar_add(&acc, &acc, &sec[i]); |
|||
} else { |
|||
secp256k1_scalar_negate(&acc, &acc); |
|||
sec[i] = acc; |
|||
} |
|||
for (j = 0; j < rsizes[i]; j++) { |
|||
secp256k1_rfc6979_hmac_sha256_generate(&rng, tmp, 32); |
|||
if (message) { |
|||
for (b = 0; b < 32; b++) { |
|||
tmp[b] ^= message[(i * 4 + j) * 32 + b]; |
|||
message[(i * 4 + j) * 32 + b] = tmp[b]; |
|||
} |
|||
} |
|||
secp256k1_scalar_set_b32(&s[npub], tmp, &overflow); |
|||
ret &= !(overflow || secp256k1_scalar_is_zero(&s[npub])); |
|||
npub++; |
|||
} |
|||
} |
|||
secp256k1_rfc6979_hmac_sha256_finalize(&rng); |
|||
secp256k1_scalar_clear(&acc); |
|||
memset(tmp, 0, 32); |
|||
return ret; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_range_proveparams(uint64_t *v, int *rings, int *rsizes, int *npub, int *secidx, uint64_t *min_value, |
|||
int *mantissa, uint64_t *scale, int *exp, int *min_bits, uint64_t value) { |
|||
int i; |
|||
*rings = 1; |
|||
rsizes[0] = 1; |
|||
secidx[0] = 0; |
|||
*scale = 1; |
|||
*mantissa = 0; |
|||
*npub = 0; |
|||
if (*min_value == UINT64_MAX) { |
|||
/* If the minimum value is the maximal representable value, then we cannot code a range. */ |
|||
*exp = -1; |
|||
} |
|||
if (*exp >= 0) { |
|||
int max_bits; |
|||
uint64_t v2; |
|||
if ((*min_value && value > INT64_MAX) || (value && *min_value >= INT64_MAX)) { |
|||
/* If either value or min_value is >= 2^63-1 then the other must by zero to avoid overflowing the proven range. */ |
|||
return 0; |
|||
} |
|||
max_bits = *min_value ? secp256k1_clz64_var(*min_value) : 64; |
|||
if (*min_bits > max_bits) { |
|||
*min_bits = max_bits; |
|||
} |
|||
if (*min_bits > 61 || value > INT64_MAX) { |
|||
/** Ten is not a power of two, so dividing by ten and then representing in base-2 times ten
|
|||
* expands the representable range. The verifier requires the proven range is within 0..2**64. |
|||
* For very large numbers (all over 2**63) we must change our exponent to compensate. |
|||
* Rather than handling it precisely, this just disables use of the exponent for big values. |
|||
*/ |
|||
*exp = 0; |
|||
} |
|||
/* Mask off the least significant digits, as requested. */ |
|||
*v = value - *min_value; |
|||
/* If the user has asked for more bits of proof then there is room for in the exponent, reduce the exponent. */ |
|||
v2 = *min_bits ? (UINT64_MAX>>(64-*min_bits)) : 0; |
|||
for (i = 0; i < *exp && (v2 <= UINT64_MAX / 10); i++) { |
|||
*v /= 10; |
|||
v2 *= 10; |
|||
} |
|||
*exp = i; |
|||
v2 = *v; |
|||
for (i = 0; i < *exp; i++) { |
|||
v2 *= 10; |
|||
*scale *= 10; |
|||
} |
|||
/* If the masked number isn't precise, compute the public offset. */ |
|||
*min_value = value - v2; |
|||
/* How many bits do we need to represent our value? */ |
|||
*mantissa = *v ? 64 - secp256k1_clz64_var(*v) : 1; |
|||
if (*min_bits > *mantissa) { |
|||
/* If the user asked for more precision, give it to them. */ |
|||
*mantissa = *min_bits; |
|||
} |
|||
/* Digits in radix-4, except for the last digit if our mantissa length is odd. */ |
|||
*rings = (*mantissa + 1) >> 1; |
|||
for (i = 0; i < *rings; i++) { |
|||
rsizes[i] = ((i < *rings - 1) | (!(*mantissa&1))) ? 4 : 2; |
|||
*npub += rsizes[i]; |
|||
secidx[i] = (*v >> (i*2)) & 3; |
|||
} |
|||
VERIFY_CHECK(*mantissa>0); |
|||
VERIFY_CHECK((*v & ~(UINT64_MAX>>(64-*mantissa))) == 0); /* Did this get all the bits? */ |
|||
} else { |
|||
/* A proof for an exact value. */ |
|||
*exp = 0; |
|||
*min_value = value; |
|||
*v = 0; |
|||
*npub = 2; |
|||
} |
|||
VERIFY_CHECK(*v * *scale + *min_value == value); |
|||
VERIFY_CHECK(*rings > 0); |
|||
VERIFY_CHECK(*rings <= 32); |
|||
VERIFY_CHECK(*npub <= 128); |
|||
return 1; |
|||
} |
|||
|
|||
/* strawman interface, writes proof in proof, a buffer of plen, proves with respect to min_value the range for commit which has the provided blinding factor and value. */ |
|||
SECP256K1_INLINE static int secp256k1_rangeproof_sign_impl(const secp256k1_ecmult_context_t* ecmult_ctx, |
|||
const secp256k1_ecmult_gen_context_t* ecmult_gen_ctx, const secp256k1_pedersen_context_t* pedersen_ctx, |
|||
const secp256k1_rangeproof_context_t* rangeproof_ctx, unsigned char *proof, int *plen, uint64_t min_value, |
|||
const unsigned char *commit, const unsigned char *blind, const unsigned char *nonce, int exp, int min_bits, uint64_t value){ |
|||
secp256k1_gej_t pubs[128]; /* Candidate digits for our proof, most inferred. */ |
|||
secp256k1_scalar_t s[128]; /* Signatures in our proof, most forged. */ |
|||
secp256k1_scalar_t sec[32]; /* Blinding factors for the correct digits. */ |
|||
secp256k1_scalar_t k[32]; /* Nonces for our non-forged signatures. */ |
|||
secp256k1_scalar_t stmp; |
|||
secp256k1_sha256_t sha256_m; |
|||
unsigned char prep[4096]; |
|||
unsigned char tmp[33]; |
|||
unsigned char *signs; /* Location of sign flags in the proof. */ |
|||
uint64_t v; |
|||
uint64_t scale; /* scale = 10^exp. */ |
|||
int mantissa; /* Number of bits proven in the blinded value. */ |
|||
int rings; /* How many digits will our proof cover. */ |
|||
int rsizes[32]; /* How many possible values there are for each place. */ |
|||
int secidx[32]; /* Which digit is the correct one. */ |
|||
int len; /* Number of bytes used so far. */ |
|||
int i; |
|||
int overflow; |
|||
int npub; |
|||
len = 0; |
|||
if (*plen < 65 || min_value > value || min_bits > 64 || min_bits < 0 || exp < -1 || exp > 18) { |
|||
return 0; |
|||
} |
|||
if (!secp256k1_range_proveparams(&v, &rings, rsizes, &npub, secidx, &min_value, &mantissa, &scale, &exp, &min_bits, value)) { |
|||
return 0; |
|||
} |
|||
proof[len] = (rsizes[0] > 1 ? (64 | exp) : 0) | (min_value ? 32 : 0); |
|||
len++; |
|||
if (rsizes[0] > 1) { |
|||
VERIFY_CHECK(mantissa > 0 && mantissa <= 64); |
|||
proof[len] = mantissa - 1; |
|||
len++; |
|||
} |
|||
if (min_value) { |
|||
for (i = 0; i < 8; i++) { |
|||
proof[len + i] = (min_value >> ((7-i) * 8)) & 255; |
|||
} |
|||
len += 8; |
|||
} |
|||
/* Do we have enough room for the proof? */ |
|||
if (*plen - len < 32 * (npub + rings - 1) + 32 + ((rings+6) >> 3)) { |
|||
return 0; |
|||
} |
|||
secp256k1_sha256_initialize(&sha256_m); |
|||
secp256k1_sha256_write(&sha256_m, commit, 33); |
|||
secp256k1_sha256_write(&sha256_m, proof, len); |
|||
|
|||
memset(prep, 0, 4096); |
|||
/* Note, the data corresponding to the blinding factors must be zero. */ |
|||
if (rsizes[rings - 1] > 1) { |
|||
int idx; |
|||
/* Value encoding sidechannel. */ |
|||
idx = rsizes[rings - 1] - 1; |
|||
idx -= secidx[rings - 1] == idx; |
|||
idx = ((rings - 1) * 4 + idx) * 32; |
|||
for (i = 0; i < 8; i++) { |
|||
prep[8 + i + idx] = prep[16 + i + idx] = prep[24 + i + idx] = (v >> (56 - i * 8)) & 255; |
|||
prep[i + idx] = 0; |
|||
} |
|||
prep[idx] = 128; |
|||
} |
|||
if (!secp256k1_rangeproof_genrand(sec, s, prep, rsizes, rings, nonce, commit, proof, len)) { |
|||
return 0; |
|||
} |
|||
memset(prep, 0, 4096); |
|||
for (i = 0; i < rings; i++) { |
|||
/* Sign will overwrite the non-forged signature, move that random value into the nonce. */ |
|||
k[i] = s[i * 4 + secidx[i]]; |
|||
secp256k1_scalar_clear(&s[i * 4 + secidx[i]]); |
|||
} |
|||
/** Genrand returns the last blinding factor as -sum(rest),
|
|||
* adding in the blinding factor for our commitment, results in the blinding factor for |
|||
* the commitment to the last digit that the verifier can compute for itself by subtracting |
|||
* all the digits in the proof from the commitment. This lets the prover skip sending the |
|||
* blinded value for one digit. |
|||
*/ |
|||
secp256k1_scalar_set_b32(&stmp, blind, &overflow); |
|||
secp256k1_scalar_add(&sec[rings - 1], &sec[rings - 1], &stmp); |
|||
if (overflow || secp256k1_scalar_is_zero(&sec[rings - 1])) { |
|||
return 0; |
|||
} |
|||
signs = &proof[len]; |
|||
/* We need one sign bit for each blinded value we send. */ |
|||
for (i = 0; i < (rings + 6) >> 3; i++) { |
|||
signs[i] = 0; |
|||
len++; |
|||
} |
|||
npub = 0; |
|||
for (i = 0; i < rings; i++) { |
|||
/*OPT: Use the precomputed gen2 basis?*/ |
|||
secp256k1_pedersen_ecmult(ecmult_gen_ctx, pedersen_ctx, &pubs[npub], &sec[i], ((uint64_t)secidx[i] * scale) << (i*2)); |
|||
if (secp256k1_gej_is_infinity(&pubs[npub])) { |
|||
return 0; |
|||
} |
|||
if (i < rings - 1) { |
|||
int size = 33; |
|||
secp256k1_ge_t c; |
|||
/*OPT: split loop and batch invert.*/ |
|||
secp256k1_ge_set_gej_var(&c, &pubs[npub]); |
|||
if(!secp256k1_eckey_pubkey_serialize(&c, tmp, &size, 1)) { |
|||
return 0; |
|||
} |
|||
secp256k1_sha256_write(&sha256_m, tmp, 33); |
|||
signs[i>>3] |= (tmp[0] == 3) << (i&7); |
|||
memcpy(&proof[len], &tmp[1], 32); |
|||
len += 32; |
|||
} |
|||
npub += rsizes[i]; |
|||
} |
|||
secp256k1_rangeproof_pub_expand(rangeproof_ctx, pubs, exp, rsizes, rings); |
|||
secp256k1_sha256_finalize(&sha256_m, tmp); |
|||
if (!secp256k1_borromean_sign(ecmult_ctx, ecmult_gen_ctx, &proof[len], s, pubs, k, sec, rsizes, secidx, rings, tmp, 32)) { |
|||
return 0; |
|||
} |
|||
len += 32; |
|||
for (i = 0; i < npub; i++) { |
|||
secp256k1_scalar_get_b32(&proof[len],&s[i]); |
|||
len += 32; |
|||
} |
|||
VERIFY_CHECK(len <= *plen); |
|||
*plen = len; |
|||
memset(prep, 0, 4096); |
|||
return 1; |
|||
} |
|||
|
|||
/* Computes blinding factor x given k, s, and the challenge e. */ |
|||
SECP256K1_INLINE static void secp256k1_rangeproof_recover_x(secp256k1_scalar_t *x, const secp256k1_scalar_t *k, const secp256k1_scalar_t *e, |
|||
const secp256k1_scalar_t *s) { |
|||
secp256k1_scalar_t stmp; |
|||
secp256k1_scalar_negate(x, s); |
|||
secp256k1_scalar_add(x, x, k); |
|||
secp256k1_scalar_inverse(&stmp, e); |
|||
secp256k1_scalar_mul(x, x, &stmp); |
|||
} |
|||
|
|||
/* Computes ring's nonce given the blinding factor x, the challenge e, and the signature s. */ |
|||
SECP256K1_INLINE static void secp256k1_rangeproof_recover_k(secp256k1_scalar_t *k, const secp256k1_scalar_t *x, const secp256k1_scalar_t *e, |
|||
const secp256k1_scalar_t *s) { |
|||
secp256k1_scalar_t stmp; |
|||
secp256k1_scalar_mul(&stmp, x, e); |
|||
secp256k1_scalar_add(k, s, &stmp); |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_rangeproof_ch32xor(unsigned char *x, const unsigned char *y) { |
|||
int i; |
|||
for (i = 0; i < 32; i++) { |
|||
x[i] ^= y[i]; |
|||
} |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_rangeproof_rewind_inner(secp256k1_scalar_t *blind, uint64_t *v, |
|||
unsigned char *m, int *mlen, secp256k1_scalar_t *ev, secp256k1_scalar_t *s, |
|||
int *rsizes, int rings, const unsigned char *nonce, const unsigned char *commit, const unsigned char *proof, int len) { |
|||
secp256k1_scalar_t s_orig[128]; |
|||
secp256k1_scalar_t sec[32]; |
|||
secp256k1_scalar_t stmp; |
|||
unsigned char prep[4096]; |
|||
unsigned char tmp[32]; |
|||
uint64_t value; |
|||
int offset; |
|||
int i; |
|||
int j; |
|||
int b; |
|||
int skip1; |
|||
int skip2; |
|||
int npub; |
|||
npub = ((rings - 1) << 2) + rsizes[rings-1]; |
|||
VERIFY_CHECK(npub <= 128); |
|||
VERIFY_CHECK(npub >= 1); |
|||
memset(prep, 0, 4096); |
|||
/* Reconstruct the provers random values. */ |
|||
secp256k1_rangeproof_genrand(sec, s_orig, prep, rsizes, rings, nonce, commit, proof, len); |
|||
*v = UINT64_MAX; |
|||
secp256k1_scalar_clear(blind); |
|||
if (rings == 1 && rsizes[0] == 1) { |
|||
/* With only a single proof, we can only recover the blinding factor. */ |
|||
secp256k1_rangeproof_recover_x(blind, &s_orig[0], &ev[0], &s[0]); |
|||
if (v) { |
|||
*v = 0; |
|||
} |
|||
if (mlen) { |
|||
*mlen = 0; |
|||
} |
|||
return 1; |
|||
} |
|||
npub = (rings - 1) << 2; |
|||
for (j = 0; j < 2; j++) { |
|||
int idx; |
|||
/* Look for a value encoding in the last ring. */ |
|||
idx = npub + rsizes[rings - 1] - 1 - j; |
|||
secp256k1_scalar_get_b32(tmp, &s[idx]); |
|||
secp256k1_rangeproof_ch32xor(tmp, &prep[idx * 32]); |
|||
if ((tmp[0] & 128) && (memcmp(&tmp[16], &tmp[24], 8) == 0) && (memcmp(&tmp[8], &tmp[16], 8) == 0)) { |
|||
value = 0; |
|||
for (i = 0; i < 8; i++) { |
|||
value = (value << 8) + tmp[24 + i]; |
|||
} |
|||
if (v) { |
|||
*v = value; |
|||
} |
|||
memcpy(&prep[idx * 32], tmp, 32); |
|||
break; |
|||
} |
|||
} |
|||
if (j > 1) { |
|||
/* Couldn't extract a value. */ |
|||
if (mlen) { |
|||
*mlen = 0; |
|||
} |
|||
return 0; |
|||
} |
|||
skip1 = rsizes[rings - 1] - 1 - j; |
|||
skip2 = ((value >> ((rings - 1) << 1)) & 3); |
|||
if (skip1 == skip2) { |
|||
/*Value is in wrong position.*/ |
|||
if (mlen) { |
|||
*mlen = 0; |
|||
} |
|||
return 0; |
|||
} |
|||
skip1 += (rings - 1) << 2; |
|||
skip2 += (rings - 1) << 2; |
|||
/* Like in the rsize[] == 1 case, Having figured out which s is the one which was not forged, we can recover the blinding factor. */ |
|||
secp256k1_rangeproof_recover_x(&stmp, &s_orig[skip2], &ev[skip2], &s[skip2]); |
|||
secp256k1_scalar_negate(&sec[rings - 1], &sec[rings - 1]); |
|||
secp256k1_scalar_add(blind, &stmp, &sec[rings - 1]); |
|||
if (!m || !mlen || *mlen == 0) { |
|||
if (mlen) { |
|||
*mlen = 0; |
|||
} |
|||
/* FIXME: cleanup in early out/failure cases. */ |
|||
return 1; |
|||
} |
|||
offset = 0; |
|||
npub = 0; |
|||
for (i = 0; i < rings; i++) { |
|||
int idx; |
|||
idx = (value >> (i << 1)) & 3; |
|||
for (j = 0; j < rsizes[i]; j++) { |
|||
if (npub == skip1 || npub == skip2) { |
|||
npub++; |
|||
continue; |
|||
} |
|||
if (idx == j) { |
|||
/** For the non-forged signatures the signature is calculated instead of random, instead we recover the prover's nonces.
|
|||
* this could just as well recover the blinding factors and messages could be put there as is done for recovering the |
|||
* blinding factor in the last ring, but it takes an inversion to recover x so it's faster to put the message data in k. |
|||
*/ |
|||
secp256k1_rangeproof_recover_k(&stmp, &sec[i], &ev[npub], &s[npub]); |
|||
} else { |
|||
stmp = s[npub]; |
|||
} |
|||
secp256k1_scalar_get_b32(tmp, &stmp); |
|||
secp256k1_rangeproof_ch32xor(tmp, &prep[npub * 32]); |
|||
for (b = 0; b < 32 && offset < *mlen; b++) { |
|||
m[offset] = tmp[b]; |
|||
offset++; |
|||
} |
|||
npub++; |
|||
} |
|||
} |
|||
*mlen = offset; |
|||
memset(prep, 0, 4096); |
|||
for (i = 0; i < 128; i++) { |
|||
secp256k1_scalar_clear(&s_orig[i]); |
|||
} |
|||
for (i = 0; i < 32; i++) { |
|||
secp256k1_scalar_clear(&sec[i]); |
|||
} |
|||
secp256k1_scalar_clear(&stmp); |
|||
return 1; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_rangeproof_getheader_impl(int *offset, int *exp, int *mantissa, uint64_t *scale, |
|||
uint64_t *min_value, uint64_t *max_value, const unsigned char *proof, int plen) { |
|||
int i; |
|||
int has_nz_range; |
|||
int has_min; |
|||
if (plen < 65 || ((proof[*offset] & 128) != 0)) { |
|||
return 0; |
|||
} |
|||
has_nz_range = proof[*offset] & 64; |
|||
has_min = proof[*offset] & 32; |
|||
*exp = -1; |
|||
*mantissa = 0; |
|||
if (has_nz_range) { |
|||
*exp = proof[*offset] & 31; |
|||
*offset += 1; |
|||
if (*exp > 18) { |
|||
return 0; |
|||
} |
|||
*mantissa = proof[*offset] + 1; |
|||
if (*mantissa > 64) { |
|||
return 0; |
|||
} |
|||
*max_value = UINT64_MAX>>(64-*mantissa); |
|||
} else { |
|||
*max_value = 0; |
|||
} |
|||
*offset += 1; |
|||
*scale = 1; |
|||
for (i = 0; i < *exp; i++) { |
|||
if (*max_value > UINT64_MAX / 10) { |
|||
return 0; |
|||
} |
|||
*max_value *= 10; |
|||
*scale *= 10; |
|||
} |
|||
*min_value = 0; |
|||
if (has_min) { |
|||
if(plen - *offset < 8) { |
|||
return 0; |
|||
} |
|||
/*FIXME: Compact minvalue encoding?*/ |
|||
for (i = 0; i < 8; i++) { |
|||
*min_value = (*min_value << 8) | proof[*offset + i]; |
|||
} |
|||
*offset += 8; |
|||
} |
|||
if (*max_value > UINT64_MAX - *min_value) { |
|||
return 0; |
|||
} |
|||
*max_value += *min_value; |
|||
return 1; |
|||
} |
|||
|
|||
/* Verifies range proof (len plen) for 33-byte commit, the min/max values proven are put in the min/max arguments; returns 0 on failure 1 on success.*/ |
|||
SECP256K1_INLINE static int secp256k1_rangeproof_verify_impl(const secp256k1_ecmult_context_t* ecmult_ctx, |
|||
const secp256k1_ecmult_gen_context_t* ecmult_gen_ctx, |
|||
const secp256k1_pedersen_context_t* pedersen_ctx, const secp256k1_rangeproof_context_t* rangeproof_ctx, |
|||
unsigned char *blindout, uint64_t *value_out, unsigned char *message_out, int *outlen, const unsigned char *nonce, |
|||
uint64_t *min_value, uint64_t *max_value, const unsigned char *commit, const unsigned char *proof, int plen) { |
|||
secp256k1_gej_t accj; |
|||
secp256k1_gej_t pubs[128]; |
|||
secp256k1_ge_t c; |
|||
secp256k1_scalar_t s[128]; |
|||
secp256k1_scalar_t evalues[128]; /* Challenges, only used during proof rewind. */ |
|||
secp256k1_sha256_t sha256_m; |
|||
int rsizes[32]; |
|||
int ret; |
|||
int i; |
|||
int exp; |
|||
int mantissa; |
|||
int offset; |
|||
int rings; |
|||
int overflow; |
|||
int npub; |
|||
int offset_post_header; |
|||
uint64_t scale; |
|||
unsigned char signs[31]; |
|||
unsigned char m[33]; |
|||
const unsigned char *e0; |
|||
offset = 0; |
|||
if (!secp256k1_rangeproof_getheader_impl(&offset, &exp, &mantissa, &scale, min_value, max_value, proof, plen)) { |
|||
return 0; |
|||
} |
|||
offset_post_header = offset; |
|||
rings = 1; |
|||
rsizes[0] = 1; |
|||
npub = 1; |
|||
if (mantissa != 0) { |
|||
rings = (mantissa >> 1); |
|||
for (i = 0; i < rings; i++) { |
|||
rsizes[i] = 4; |
|||
} |
|||
npub = (mantissa >> 1) << 2; |
|||
if (mantissa & 1) { |
|||
rsizes[rings] = 2; |
|||
npub += rsizes[rings]; |
|||
rings++; |
|||
} |
|||
} |
|||
VERIFY_CHECK(rings <= 32); |
|||
if (plen - offset < 32 * (npub + rings - 1) + 32 + ((rings+6) >> 3)) { |
|||
return 0; |
|||
} |
|||
secp256k1_sha256_initialize(&sha256_m); |
|||
secp256k1_sha256_write(&sha256_m, commit, 33); |
|||
secp256k1_sha256_write(&sha256_m, proof, offset); |
|||
for(i = 0; i < rings - 1; i++) { |
|||
signs[i] = (proof[offset + ( i>> 3)] & (1 << (i & 7))) != 0; |
|||
} |
|||
offset += (rings + 6) >> 3; |
|||
if ((rings - 1) & 7) { |
|||
/* Number of coded blinded points is not a multiple of 8, force extra sign bits to 0 to reject mutation. */ |
|||
if ((proof[offset - 1] >> ((rings - 1) & 7)) != 0) { |
|||
return 0; |
|||
} |
|||
} |
|||
npub = 0; |
|||
secp256k1_gej_set_infinity(&accj); |
|||
if (*min_value) { |
|||
secp256k1_pedersen_ecmult_small(pedersen_ctx, &accj, *min_value); |
|||
} |
|||
for(i = 0; i < rings - 1; i++) { |
|||
memcpy(&m[1], &proof[offset], 32); |
|||
m[0] = 2 + signs[i]; |
|||
if (!secp256k1_eckey_pubkey_parse(&c, m, 33)) { |
|||
return 0; |
|||
} |
|||
secp256k1_sha256_write(&sha256_m, m, 33); |
|||
secp256k1_gej_set_ge(&pubs[npub], &c); |
|||
secp256k1_gej_add_ge_var(&accj, &accj, &c, NULL); |
|||
offset += 32; |
|||
npub += rsizes[i]; |
|||
} |
|||
secp256k1_gej_neg(&accj, &accj); |
|||
if (!secp256k1_eckey_pubkey_parse(&c, commit, 33)) { |
|||
return 0; |
|||
} |
|||
secp256k1_gej_add_ge_var(&pubs[npub], &accj, &c, NULL); |
|||
if (secp256k1_gej_is_infinity(&pubs[npub])) { |
|||
return 0; |
|||
} |
|||
secp256k1_rangeproof_pub_expand(rangeproof_ctx, pubs, exp, rsizes, rings); |
|||
npub += rsizes[rings - 1]; |
|||
e0 = &proof[offset]; |
|||
offset += 32; |
|||
for (i = 0; i < npub; i++) { |
|||
secp256k1_scalar_set_b32(&s[i], &proof[offset], &overflow); |
|||
if (overflow) { |
|||
return 0; |
|||
} |
|||
offset += 32; |
|||
} |
|||
if (offset != plen) { |
|||
/*Extra data found, reject.*/ |
|||
return 0; |
|||
} |
|||
secp256k1_sha256_finalize(&sha256_m, m); |
|||
ret = secp256k1_borromean_verify(ecmult_ctx, nonce ? evalues : NULL, e0, s, pubs, rsizes, rings, m, 32); |
|||
if (ret && nonce) { |
|||
/* Given the nonce, try rewinding the witness to recover its initial state. */ |
|||
secp256k1_scalar_t blind; |
|||
unsigned char commitrec[33]; |
|||
uint64_t vv; |
|||
if (!ecmult_gen_ctx) { |
|||
return 0; |
|||
} |
|||
if (!secp256k1_rangeproof_rewind_inner(&blind, &vv, message_out, outlen, evalues, s, rsizes, rings, nonce, commit, proof, offset_post_header)) { |
|||
return 0; |
|||
} |
|||
/* Unwind apparently successful, see if the commitment can be reconstructed. */ |
|||
/* FIXME: should check vv is in the mantissa's range. */ |
|||
vv = (vv * scale) + *min_value; |
|||
secp256k1_pedersen_ecmult(ecmult_gen_ctx, pedersen_ctx, &accj, &blind, vv); |
|||
if (secp256k1_gej_is_infinity(&accj)) { |
|||
return 0; |
|||
} |
|||
secp256k1_ge_set_gej(&c, &accj); |
|||
i = 33; |
|||
secp256k1_eckey_pubkey_serialize(&c, commitrec, &i, 1); |
|||
if (memcmp(commitrec, commit, 33) != 0) { |
|||
return 0; |
|||
} |
|||
if (blindout) { |
|||
secp256k1_scalar_get_b32(blindout, &blind); |
|||
} |
|||
if (value_out) { |
|||
*value_out = vv; |
|||
} |
|||
} |
|||
return ret; |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,279 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2015 Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef SECP256K1_MODULE_RANGEPROOF_TESTS |
|||
#define SECP256K1_MODULE_RANGEPROOF_TESTS |
|||
|
|||
void test_pedersen(void) { |
|||
unsigned char commits[33*19]; |
|||
const unsigned char *cptr[19]; |
|||
unsigned char blinds[32*19]; |
|||
const unsigned char *bptr[19]; |
|||
secp256k1_scalar_t s; |
|||
uint64_t values[19]; |
|||
int64_t totalv; |
|||
int i; |
|||
int inputs; |
|||
int outputs; |
|||
int total; |
|||
inputs = (secp256k1_rand32() & 7) + 1; |
|||
outputs = (secp256k1_rand32() & 7) + 2; |
|||
total = inputs + outputs; |
|||
for (i = 0; i < 19; i++) { |
|||
cptr[i] = &commits[i * 33]; |
|||
bptr[i] = &blinds[i * 32]; |
|||
} |
|||
totalv = 0; |
|||
for (i = 0; i < inputs; i++) { |
|||
values[i] = secp256k1_rands64(0, INT64_MAX - totalv); |
|||
totalv += values[i]; |
|||
} |
|||
if (secp256k1_rand32() & 1) { |
|||
for (i = 0; i < outputs; i++) { |
|||
int64_t max = INT64_MAX; |
|||
if (totalv < 0) { |
|||
max += totalv; |
|||
} |
|||
values[i + inputs] = secp256k1_rands64(0, max); |
|||
totalv -= values[i + inputs]; |
|||
} |
|||
} else { |
|||
for (i = 0; i < outputs - 1; i++) { |
|||
values[i + inputs] = secp256k1_rands64(0, totalv); |
|||
totalv -= values[i + inputs]; |
|||
} |
|||
values[total - 1] = totalv >> (secp256k1_rand32() & 1); |
|||
totalv -= values[total - 1]; |
|||
} |
|||
for (i = 0; i < total - 1; i++) { |
|||
random_scalar_order(&s); |
|||
secp256k1_scalar_get_b32(&blinds[i * 32], &s); |
|||
} |
|||
CHECK(secp256k1_pedersen_blind_sum(ctx, &blinds[(total - 1) * 32], bptr, total - 1, inputs)); |
|||
for (i = 0; i < total; i++) { |
|||
CHECK(secp256k1_pedersen_commit(ctx, &commits[i * 33], &blinds[i * 32], values[i])); |
|||
} |
|||
CHECK(secp256k1_pedersen_verify_tally(ctx, cptr, inputs, &cptr[inputs], outputs, totalv)); |
|||
CHECK(!secp256k1_pedersen_verify_tally(ctx, cptr, inputs, &cptr[inputs], outputs, totalv + 1)); |
|||
random_scalar_order(&s); |
|||
for (i = 0; i < 4; i++) { |
|||
secp256k1_scalar_get_b32(&blinds[i * 32], &s); |
|||
} |
|||
values[0] = INT64_MAX; |
|||
values[1] = 0; |
|||
values[2] = 1; |
|||
for (i = 0; i < 3; i++) { |
|||
CHECK(secp256k1_pedersen_commit(ctx, &commits[i * 33], &blinds[i * 32], values[i])); |
|||
} |
|||
CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[1], 1, &cptr[2], 1, -1)); |
|||
CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[2], 1, &cptr[1], 1, 1)); |
|||
CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[0], 1, &cptr[0], 1, 0)); |
|||
CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[0], 1, &cptr[1], 1, INT64_MAX)); |
|||
CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[1], 1, &cptr[1], 1, 0)); |
|||
CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[1], 1, &cptr[0], 1, -INT64_MAX)); |
|||
} |
|||
|
|||
void test_borromean(void) { |
|||
unsigned char e0[32]; |
|||
secp256k1_scalar_t s[64]; |
|||
secp256k1_gej_t pubs[64]; |
|||
secp256k1_scalar_t k[8]; |
|||
secp256k1_scalar_t sec[8]; |
|||
secp256k1_ge_t ge; |
|||
secp256k1_scalar_t one; |
|||
unsigned char m[32]; |
|||
int rsizes[8]; |
|||
int secidx[8]; |
|||
int nrings; |
|||
int i; |
|||
int j; |
|||
int c; |
|||
secp256k1_rand256_test(m); |
|||
nrings = 1 + (secp256k1_rand32()&7); |
|||
c = 0; |
|||
secp256k1_scalar_set_int(&one, 1); |
|||
if (secp256k1_rand32()&1) { |
|||
secp256k1_scalar_negate(&one, &one); |
|||
} |
|||
for (i = 0; i < nrings; i++) { |
|||
rsizes[i] = 1 + (secp256k1_rand32()&7); |
|||
secidx[i] = secp256k1_rand32() % rsizes[i]; |
|||
random_scalar_order(&sec[i]); |
|||
random_scalar_order(&k[i]); |
|||
if(secp256k1_rand32()&7) { |
|||
sec[i] = one; |
|||
} |
|||
if(secp256k1_rand32()&7) { |
|||
k[i] = one; |
|||
} |
|||
for (j = 0; j < rsizes[i]; j++) { |
|||
random_scalar_order(&s[c + j]); |
|||
if(secp256k1_rand32()&7) { |
|||
s[i] = one; |
|||
} |
|||
if (j == secidx[i]) { |
|||
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubs[c + j], &sec[i]); |
|||
} else { |
|||
random_group_element_test(&ge); |
|||
random_group_element_jacobian_test(&pubs[c + j],&ge); |
|||
} |
|||
} |
|||
c += rsizes[i]; |
|||
} |
|||
CHECK(secp256k1_borromean_sign(&ctx->ecmult_ctx, &ctx->ecmult_gen_ctx, e0, s, pubs, k, sec, rsizes, secidx, nrings, m, 32)); |
|||
CHECK(secp256k1_borromean_verify(&ctx->ecmult_ctx, NULL, e0, s, pubs, rsizes, nrings, m, 32)); |
|||
i = secp256k1_rand32() % c; |
|||
secp256k1_scalar_negate(&s[i],&s[i]); |
|||
CHECK(!secp256k1_borromean_verify(&ctx->ecmult_ctx, NULL, e0, s, pubs, rsizes, nrings, m, 32)); |
|||
secp256k1_scalar_negate(&s[i],&s[i]); |
|||
secp256k1_scalar_set_int(&one, 1); |
|||
for(j = 0; j < 4; j++) { |
|||
i = secp256k1_rand32() % c; |
|||
if (secp256k1_rand32() & 1) { |
|||
secp256k1_gej_double_var(&pubs[i],&pubs[i], NULL); |
|||
} else { |
|||
secp256k1_scalar_add(&s[i],&s[i],&one); |
|||
} |
|||
CHECK(!secp256k1_borromean_verify(&ctx->ecmult_ctx, NULL, e0, s, pubs, rsizes, nrings, m, 32)); |
|||
} |
|||
} |
|||
|
|||
void test_rangeproof(void) { |
|||
const uint64_t testvs[11] = {0, 1, 5, 11, 65535, 65537, INT32_MAX, UINT32_MAX, INT64_MAX - 1, INT64_MAX, UINT64_MAX}; |
|||
unsigned char commit[33]; |
|||
unsigned char commit2[33]; |
|||
unsigned char proof[5134]; |
|||
unsigned char blind[32]; |
|||
unsigned char blindout[32]; |
|||
unsigned char message[4096]; |
|||
int mlen; |
|||
uint64_t v; |
|||
uint64_t vout; |
|||
uint64_t vmin; |
|||
uint64_t minv; |
|||
uint64_t maxv; |
|||
int len; |
|||
int i; |
|||
int j; |
|||
int k; |
|||
secp256k1_rand256(blind); |
|||
for (i = 0; i < 11; i++) { |
|||
v = testvs[i]; |
|||
CHECK(secp256k1_pedersen_commit(ctx, commit, blind, v)); |
|||
for (vmin = 0; vmin < (i<9 && i > 0 ? 2 : 1); vmin++) { |
|||
len = 5134; |
|||
CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, vmin, commit, blind, commit, 0, 0, v)); |
|||
CHECK(len <= 5134); |
|||
mlen = 4096; |
|||
CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, message, &mlen, commit, &minv, &maxv, commit, proof, len)); |
|||
for (j = 0; j < mlen; j++) { |
|||
CHECK(message[j] == 0); |
|||
} |
|||
CHECK(mlen <= 4096); |
|||
CHECK(memcmp(blindout, blind, 32) == 0); |
|||
CHECK(vout == v); |
|||
CHECK(minv <= v); |
|||
CHECK(maxv >= v); |
|||
len = 5134; |
|||
CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, v, commit, blind, commit, -1, 64, v)); |
|||
CHECK(len <= 73); |
|||
CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, NULL, NULL, commit, &minv, &maxv, commit, proof, len)); |
|||
CHECK(memcmp(blindout, blind, 32) == 0); |
|||
CHECK(vout == v); |
|||
CHECK(minv == v); |
|||
CHECK(maxv == v); |
|||
} |
|||
} |
|||
secp256k1_rand256(blind); |
|||
v = INT64_MAX - 1; |
|||
CHECK(secp256k1_pedersen_commit(ctx, commit, blind, v)); |
|||
for (i = 0; i < 19; i++) { |
|||
len = 5134; |
|||
CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, 0, commit, blind, commit, i, 0, v)); |
|||
CHECK(secp256k1_rangeproof_verify(ctx, &minv, &maxv, commit, proof, len)); |
|||
CHECK(len <= 5134); |
|||
CHECK(minv <= v); |
|||
CHECK(maxv >= v); |
|||
} |
|||
secp256k1_rand256(blind); |
|||
{ |
|||
/*Malleability test.*/ |
|||
v = secp256k1_rands64(0, 255); |
|||
CHECK(secp256k1_pedersen_commit(ctx, commit, blind, v)); |
|||
len = 5134; |
|||
CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, 0, commit, blind, commit, 0, 3, v)); |
|||
CHECK(len <= 5134); |
|||
for (i = 0; i < len*8; i++) { |
|||
proof[i >> 3] ^= 1 << (i & 7); |
|||
CHECK(!secp256k1_rangeproof_verify(ctx, &minv, &maxv, commit, proof, len)); |
|||
proof[i >> 3] ^= 1 << (i & 7); |
|||
} |
|||
CHECK(secp256k1_rangeproof_verify(ctx, &minv, &maxv, commit, proof, len)); |
|||
CHECK(minv <= v); |
|||
CHECK(maxv >= v); |
|||
} |
|||
memcpy(commit2, commit, 33); |
|||
for (i = 0; i < 10 * count; i++) { |
|||
int exp; |
|||
int min_bits; |
|||
v = secp256k1_rands64(0, UINT64_MAX >> (secp256k1_rand32()&63)); |
|||
vmin = 0; |
|||
if ((v < INT64_MAX) && (secp256k1_rand32()&1)) { |
|||
vmin = secp256k1_rands64(0, v); |
|||
} |
|||
secp256k1_rand256(blind); |
|||
CHECK(secp256k1_pedersen_commit(ctx, commit, blind, v)); |
|||
len = 5134; |
|||
exp = (int)secp256k1_rands64(0,18)-(int)secp256k1_rands64(0,18); |
|||
if (exp < 0) { |
|||
exp = -exp; |
|||
} |
|||
min_bits = (int)secp256k1_rands64(0,64)-(int)secp256k1_rands64(0,64); |
|||
if (min_bits < 0) { |
|||
min_bits = -min_bits; |
|||
} |
|||
CHECK(secp256k1_rangeproof_sign(ctx, proof, &len, vmin, commit, blind, commit, exp, min_bits, v)); |
|||
CHECK(len <= 5134); |
|||
mlen = 4096; |
|||
CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, message, &mlen, commit, &minv, &maxv, commit, proof, len)); |
|||
for (j = 0; j < mlen; j++) { |
|||
CHECK(message[j] == 0); |
|||
} |
|||
CHECK(mlen <= 4096); |
|||
CHECK(memcmp(blindout, blind, 32) == 0); |
|||
CHECK(vout == v); |
|||
CHECK(minv <= v); |
|||
CHECK(maxv >= v); |
|||
CHECK(secp256k1_rangeproof_rewind(ctx, blindout, &vout, NULL, NULL, commit, &minv, &maxv, commit, proof, len)); |
|||
memcpy(commit2, commit, 33); |
|||
} |
|||
for (j = 0; j < 10; j++) { |
|||
for (i = 0; i < 96; i++) { |
|||
secp256k1_rand256(&proof[i * 32]); |
|||
} |
|||
for (k = 0; k < 128; k++) { |
|||
len = k; |
|||
CHECK(!secp256k1_rangeproof_verify(ctx, &minv, &maxv, commit2, proof, len)); |
|||
} |
|||
len = secp256k1_rands64(0, 3072); |
|||
CHECK(!secp256k1_rangeproof_verify(ctx, &minv, &maxv, commit2, proof, len)); |
|||
} |
|||
} |
|||
|
|||
void run_rangeproof_tests(void) { |
|||
int i; |
|||
secp256k1_pedersen_context_initialize(ctx); |
|||
secp256k1_rangeproof_context_initialize(ctx); |
|||
for (i = 0; i < 10*count; i++) { |
|||
test_pedersen(); |
|||
} |
|||
for (i = 0; i < 10*count; i++) { |
|||
test_borromean(); |
|||
} |
|||
test_rangeproof(); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,11 @@ |
|||
include_HEADERS += include/secp256k1_schnorr.h |
|||
noinst_HEADERS += src/modules/schnorr/main_impl.h |
|||
noinst_HEADERS += src/modules/schnorr/schnorr.h |
|||
noinst_HEADERS += src/modules/schnorr/schnorr_impl.h |
|||
noinst_HEADERS += src/modules/schnorr/tests_impl.h |
|||
if USE_BENCHMARK |
|||
noinst_PROGRAMS += bench_schnorr_verify |
|||
bench_schnorr_verify_SOURCES = src/bench_schnorr_verify.c |
|||
bench_schnorr_verify_LDADD = libsecp256k1.la $(SECP_LIBS) |
|||
bench_schnorr_verify_LDFLAGS = -static |
|||
endif |
@ -0,0 +1,163 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014-2015 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef SECP256K1_MODULE_SCHNORR_MAIN |
|||
#define SECP256K1_MODULE_SCHNORR_MAIN |
|||
|
|||
#include "modules/schnorr/schnorr_impl.h" |
|||
|
|||
static void secp256k1_schnorr_msghash_sha256(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) { |
|||
secp256k1_sha256_t sha; |
|||
secp256k1_sha256_initialize(&sha); |
|||
secp256k1_sha256_write(&sha, r32, 32); |
|||
secp256k1_sha256_write(&sha, msg32, 32); |
|||
secp256k1_sha256_finalize(&sha, h32); |
|||
} |
|||
|
|||
static const unsigned char secp256k1_schnorr_algo16[16] = "Schnorr+SHA256 "; |
|||
|
|||
int secp256k1_schnorr_sign(const secp256k1_context_t* ctx, const unsigned char *msg32, unsigned char *sig64, const unsigned char *seckey, secp256k1_nonce_function_t noncefp, const void* noncedata) { |
|||
secp256k1_scalar_t sec, non; |
|||
int ret = 0; |
|||
int overflow = 0; |
|||
unsigned int count = 0; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
ARG_CHECK(msg32 != NULL); |
|||
ARG_CHECK(sig64 != NULL); |
|||
ARG_CHECK(seckey != NULL); |
|||
if (noncefp == NULL) { |
|||
noncefp = secp256k1_nonce_function_default; |
|||
} |
|||
|
|||
secp256k1_scalar_set_b32(&sec, seckey, NULL); |
|||
while (1) { |
|||
unsigned char nonce32[32]; |
|||
ret = noncefp(nonce32, msg32, seckey, secp256k1_schnorr_algo16, count, noncedata); |
|||
if (!ret) { |
|||
break; |
|||
} |
|||
secp256k1_scalar_set_b32(&non, nonce32, &overflow); |
|||
memset(nonce32, 0, 32); |
|||
if (!secp256k1_scalar_is_zero(&non) && !overflow) { |
|||
if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, NULL, secp256k1_schnorr_msghash_sha256, msg32)) { |
|||
break; |
|||
} |
|||
} |
|||
count++; |
|||
} |
|||
if (!ret) { |
|||
memset(sig64, 0, 64); |
|||
} |
|||
secp256k1_scalar_clear(&non); |
|||
secp256k1_scalar_clear(&sec); |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_schnorr_verify(const secp256k1_context_t* ctx, const unsigned char *msg32, const unsigned char *sig64, const secp256k1_pubkey_t *pubkey) { |
|||
secp256k1_ge_t q; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(msg32 != NULL); |
|||
ARG_CHECK(sig64 != NULL); |
|||
ARG_CHECK(pubkey != NULL); |
|||
|
|||
secp256k1_pubkey_load(ctx, &q, pubkey); |
|||
return secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32); |
|||
} |
|||
|
|||
int secp256k1_schnorr_recover(const secp256k1_context_t* ctx, const unsigned char *msg32, const unsigned char *sig64, secp256k1_pubkey_t *pubkey) { |
|||
secp256k1_ge_t q; |
|||
|
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(msg32 != NULL); |
|||
ARG_CHECK(sig64 != NULL); |
|||
ARG_CHECK(pubkey != NULL); |
|||
|
|||
if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32)) { |
|||
secp256k1_pubkey_save(pubkey, &q); |
|||
return 1; |
|||
} else { |
|||
memset(pubkey, 0, sizeof(*pubkey)); |
|||
return 0; |
|||
} |
|||
} |
|||
|
|||
int secp256k1_schnorr_generate_nonce_pair(const secp256k1_context_t* ctx, const unsigned char *msg32, const unsigned char *sec32, secp256k1_nonce_function_t noncefp, const void* noncedata, secp256k1_pubkey_t *pubnonce, unsigned char *privnonce32) { |
|||
int count = 0; |
|||
int ret = 1; |
|||
secp256k1_gej_t Qj; |
|||
secp256k1_ge_t Q; |
|||
secp256k1_scalar_t sec; |
|||
|
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
ARG_CHECK(msg32 != NULL); |
|||
ARG_CHECK(sec32 != NULL); |
|||
ARG_CHECK(pubnonce != NULL); |
|||
ARG_CHECK(privnonce32 != NULL); |
|||
|
|||
if (noncefp == NULL) { |
|||
noncefp = secp256k1_nonce_function_default; |
|||
} |
|||
|
|||
do { |
|||
int overflow; |
|||
ret = noncefp(privnonce32, msg32, sec32, secp256k1_schnorr_algo16, count++, noncedata); |
|||
if (!ret) { |
|||
break; |
|||
} |
|||
secp256k1_scalar_set_b32(&sec, privnonce32, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&sec)) { |
|||
continue; |
|||
} |
|||
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sec); |
|||
secp256k1_ge_set_gej(&Q, &Qj); |
|||
|
|||
secp256k1_pubkey_save(pubnonce, &Q); |
|||
break; |
|||
} while(1); |
|||
|
|||
secp256k1_scalar_clear(&sec); |
|||
if (!ret) { |
|||
memset(pubnonce, 0, sizeof(*pubnonce)); |
|||
} |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_schnorr_partial_sign(const secp256k1_context_t* ctx, const unsigned char *msg32, unsigned char *sig64, const unsigned char *sec32, const unsigned char *secnonce32, const secp256k1_pubkey_t *pubnonce_others) { |
|||
int overflow = 0; |
|||
secp256k1_scalar_t sec, non; |
|||
secp256k1_ge_t pubnon; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
ARG_CHECK(msg32 != NULL); |
|||
ARG_CHECK(sig64 != NULL); |
|||
ARG_CHECK(sec32 != NULL); |
|||
ARG_CHECK(secnonce32 != NULL); |
|||
ARG_CHECK(pubnonce_others != NULL); |
|||
|
|||
secp256k1_scalar_set_b32(&sec, sec32, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&sec)) { |
|||
return -1; |
|||
} |
|||
secp256k1_scalar_set_b32(&non, secnonce32, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&non)) { |
|||
return -1; |
|||
} |
|||
secp256k1_pubkey_load(ctx, &pubnon, pubnonce_others); |
|||
return secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, &pubnon, secp256k1_schnorr_msghash_sha256, msg32); |
|||
} |
|||
|
|||
int secp256k1_schnorr_partial_combine(const secp256k1_context_t* ctx, unsigned char *sig64, int n, const unsigned char * const *sig64sin) { |
|||
ARG_CHECK(sig64 != NULL); |
|||
ARG_CHECK(n >= 1); |
|||
ARG_CHECK(sig64sin != NULL); |
|||
return secp256k1_schnorr_sig_combine(sig64, n, sig64sin); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,20 @@ |
|||
/***********************************************************************
|
|||
* Copyright (c) 2014-2015 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php. *
|
|||
***********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_SCHNORR_ |
|||
#define _SECP256K1_SCHNORR_ |
|||
|
|||
#include "scalar.h" |
|||
#include "group.h" |
|||
|
|||
typedef void (*secp256k1_schnorr_msghash_t)(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32); |
|||
|
|||
static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context_t* ctx, unsigned char *sig64, const secp256k1_scalar_t *key, const secp256k1_scalar_t *nonce, const secp256k1_ge_t *pubnonce, secp256k1_schnorr_msghash_t hash, const unsigned char *msg32); |
|||
static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context_t* ctx, const unsigned char *sig64, const secp256k1_ge_t *pubkey, secp256k1_schnorr_msghash_t hash, const unsigned char *msg32); |
|||
static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context_t* ctx, const unsigned char *sig64, secp256k1_ge_t *pubkey, secp256k1_schnorr_msghash_t hash, const unsigned char *msg32); |
|||
static int secp256k1_schnorr_sig_combine(unsigned char *sig64, int n, const unsigned char * const *sig64ins); |
|||
|
|||
#endif |
@ -0,0 +1,207 @@ |
|||
/***********************************************************************
|
|||
* Copyright (c) 2014-2015 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php. *
|
|||
***********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_SCHNORR_IMPL_H_ |
|||
#define _SECP256K1_SCHNORR_IMPL_H_ |
|||
|
|||
#include <string.h> |
|||
|
|||
#include "schnorr.h" |
|||
#include "num.h" |
|||
#include "field.h" |
|||
#include "group.h" |
|||
#include "ecmult.h" |
|||
#include "ecmult_gen.h" |
|||
|
|||
/**
|
|||
* Custom Schnorr-based signature scheme. They support multiparty signing, public key |
|||
* recovery and batch validation. |
|||
* |
|||
* Rationale for verifying R's y coordinate: |
|||
* In order to support batch validation and public key recovery, the full R point must |
|||
* be known to verifiers, rather than just its x coordinate. In order to not risk |
|||
* being more strict in batch validation than normal validation, validators must be |
|||
* required to reject signatures with incorrect y coordinate. This is only possible |
|||
* by including a (relatively slow) field inverse, or a field square root. However, |
|||
* batch validation offers potentially much higher benefits than this cost. |
|||
* |
|||
* Rationale for having an implicit y coordinate oddness: |
|||
* If we commit to having the full R point known to verifiers, there are two mechanism. |
|||
* Either include its oddness in the signature, or give it an implicit fixed value. |
|||
* As the R y coordinate can be flipped by a simple negation of the nonce, we choose the |
|||
* latter, as it comes with nearly zero impact on signing or validation performance, and |
|||
* saves a byte in the signature. |
|||
* |
|||
* Signing: |
|||
* Inputs: 32-byte message m, 32-byte scalar key x (!=0), 32-byte scalar nonce k (!=0) |
|||
* |
|||
* Compute point R = k * G. Reject nonce if R's y coordinate is odd (or negate nonce). |
|||
* Compute 32-byte r, the serialization of R's x coordinate. |
|||
* Compute scalar h = Hash(r || m). Reject nonce if h == 0 or h >= order. |
|||
* Compute scalar s = k - h * x. |
|||
* The signature is (r, s). |
|||
* |
|||
* |
|||
* Verification: |
|||
* Inputs: 32-byte message m, public key point Q, signature: (32-byte r, scalar s) |
|||
* |
|||
* Signature is invalid if s >= order. |
|||
* Signature is invalid if r >= p. |
|||
* Compute scalar h = Hash(r || m). Signature is invalid if h == 0 or h >= order. |
|||
* Option 1 (faster for single verification): |
|||
* Compute point R = h * Q + s * G. Signature is invalid if R is infinity or R's y coordinate is odd. |
|||
* Signature is valid if the serialization of R's x coordinate equals r. |
|||
* Option 2 (allows batch validation and pubkey recovery): |
|||
* Decompress x coordinate r into point R, with odd y coordinate. Fail if R is not on the curve. |
|||
* Signature is valid if R + h * Q + s * G == 0. |
|||
*/ |
|||
|
|||
static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context_t* ctx, unsigned char *sig64, const secp256k1_scalar_t *key, const secp256k1_scalar_t *nonce, const secp256k1_ge_t *pubnonce, secp256k1_schnorr_msghash_t hash, const unsigned char *msg32) { |
|||
secp256k1_gej_t Rj; |
|||
secp256k1_ge_t Ra; |
|||
unsigned char h32[32]; |
|||
secp256k1_scalar_t h, s; |
|||
int overflow; |
|||
secp256k1_scalar_t n; |
|||
|
|||
if (secp256k1_scalar_is_zero(key) || secp256k1_scalar_is_zero(nonce)) { |
|||
return 0; |
|||
} |
|||
n = *nonce; |
|||
|
|||
secp256k1_ecmult_gen(ctx, &Rj, &n); |
|||
if (pubnonce) { |
|||
secp256k1_gej_add_ge(&Rj, &Rj, pubnonce); |
|||
} |
|||
secp256k1_ge_set_gej(&Ra, &Rj); |
|||
secp256k1_fe_normalize(&Ra.y); |
|||
if (secp256k1_fe_is_odd(&Ra.y)) { |
|||
/* R's y coordinate is odd, which is not allowed (see rationale above).
|
|||
Force it to be even by negating the nonce. Note that this even works |
|||
for multiparty signing, as the R point is known to all participants, |
|||
which can all decide to flip the sign in unison, resulting in the |
|||
overall R point to be negated too. */ |
|||
secp256k1_scalar_negate(&n, &n); |
|||
} |
|||
secp256k1_fe_normalize(&Ra.x); |
|||
secp256k1_fe_get_b32(sig64, &Ra.x); |
|||
hash(h32, sig64, msg32); |
|||
overflow = 0; |
|||
secp256k1_scalar_set_b32(&h, h32, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&h)) { |
|||
secp256k1_scalar_clear(&n); |
|||
return 0; |
|||
} |
|||
secp256k1_scalar_mul(&s, &h, key); |
|||
secp256k1_scalar_negate(&s, &s); |
|||
secp256k1_scalar_add(&s, &s, &n); |
|||
secp256k1_scalar_clear(&n); |
|||
secp256k1_scalar_get_b32(sig64 + 32, &s); |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context_t* ctx, const unsigned char *sig64, const secp256k1_ge_t *pubkey, secp256k1_schnorr_msghash_t hash, const unsigned char *msg32) { |
|||
secp256k1_gej_t Qj, Rj; |
|||
secp256k1_ge_t Ra; |
|||
secp256k1_fe_t Rx; |
|||
secp256k1_scalar_t h, s; |
|||
unsigned char hh[32]; |
|||
int overflow; |
|||
|
|||
if (secp256k1_ge_is_infinity(pubkey)) { |
|||
return 0; |
|||
} |
|||
hash(hh, sig64, msg32); |
|||
overflow = 0; |
|||
secp256k1_scalar_set_b32(&h, hh, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&h)) { |
|||
return 0; |
|||
} |
|||
overflow = 0; |
|||
secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow); |
|||
if (overflow) { |
|||
return 0; |
|||
} |
|||
if (!secp256k1_fe_set_b32(&Rx, sig64)) { |
|||
return 0; |
|||
} |
|||
secp256k1_gej_set_ge(&Qj, pubkey); |
|||
secp256k1_ecmult(ctx, &Rj, &Qj, &h, &s); |
|||
if (secp256k1_gej_is_infinity(&Rj)) { |
|||
return 0; |
|||
} |
|||
secp256k1_ge_set_gej_var(&Ra, &Rj); |
|||
secp256k1_fe_normalize_var(&Ra.y); |
|||
if (secp256k1_fe_is_odd(&Ra.y)) { |
|||
return 0; |
|||
} |
|||
return secp256k1_fe_equal_var(&Rx, &Ra.x); |
|||
} |
|||
|
|||
static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context_t* ctx, const unsigned char *sig64, secp256k1_ge_t *pubkey, secp256k1_schnorr_msghash_t hash, const unsigned char *msg32) { |
|||
secp256k1_gej_t Qj, Rj; |
|||
secp256k1_ge_t Ra; |
|||
secp256k1_fe_t Rx; |
|||
secp256k1_scalar_t h, s; |
|||
unsigned char hh[32]; |
|||
int overflow; |
|||
|
|||
hash(hh, sig64, msg32); |
|||
overflow = 0; |
|||
secp256k1_scalar_set_b32(&h, hh, &overflow); |
|||
if (overflow || secp256k1_scalar_is_zero(&h)) { |
|||
return 0; |
|||
} |
|||
overflow = 0; |
|||
secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow); |
|||
if (overflow) { |
|||
return 0; |
|||
} |
|||
if (!secp256k1_fe_set_b32(&Rx, sig64)) { |
|||
return 0; |
|||
} |
|||
if (!secp256k1_ge_set_xo_var(&Ra, &Rx, 0)) { |
|||
return 0; |
|||
} |
|||
secp256k1_gej_set_ge(&Rj, &Ra); |
|||
secp256k1_scalar_inverse_var(&h, &h); |
|||
secp256k1_scalar_negate(&s, &s); |
|||
secp256k1_scalar_mul(&s, &s, &h); |
|||
secp256k1_ecmult(ctx, &Qj, &Rj, &h, &s); |
|||
if (secp256k1_gej_is_infinity(&Qj)) { |
|||
return 0; |
|||
} |
|||
secp256k1_ge_set_gej(pubkey, &Qj); |
|||
return 1; |
|||
} |
|||
|
|||
static int secp256k1_schnorr_sig_combine(unsigned char *sig64, int n, const unsigned char * const *sig64ins) { |
|||
secp256k1_scalar_t s = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); |
|||
int i; |
|||
for (i = 0; i < n; i++) { |
|||
secp256k1_scalar_t si; |
|||
int overflow; |
|||
secp256k1_scalar_set_b32(&si, sig64ins[i] + 32, &overflow); |
|||
if (overflow) { |
|||
return -1; |
|||
} |
|||
if (i) { |
|||
if (memcmp(sig64ins[i - 1], sig64ins[i], 32) != 0) { |
|||
return -1; |
|||
} |
|||
} |
|||
secp256k1_scalar_add(&s, &s, &si); |
|||
} |
|||
if (secp256k1_scalar_is_zero(&s)) { |
|||
return 0; |
|||
} |
|||
memcpy(sig64, sig64ins[0], 32); |
|||
secp256k1_scalar_get_b32(sig64 + 32, &s); |
|||
secp256k1_scalar_clear(&s); |
|||
return 1; |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,173 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014-2015 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef SECP256K1_MODULE_SCHNORR_TESTS |
|||
#define SECP256K1_MODULE_SCHNORR_TESTS |
|||
|
|||
void test_schnorr_end_to_end(void) { |
|||
unsigned char privkey[32]; |
|||
unsigned char message[32]; |
|||
unsigned char schnorr_signature[64]; |
|||
secp256k1_pubkey_t pubkey, recpubkey; |
|||
|
|||
/* Generate a random key and message. */ |
|||
{ |
|||
secp256k1_scalar_t key; |
|||
random_scalar_order_test(&key); |
|||
secp256k1_scalar_get_b32(privkey, &key); |
|||
secp256k1_rand256_test(message); |
|||
} |
|||
|
|||
/* Construct and verify corresponding public key. */ |
|||
CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); |
|||
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); |
|||
|
|||
/* Schnorr sign. */ |
|||
CHECK(secp256k1_schnorr_sign(ctx, message, schnorr_signature, privkey, NULL, NULL) == 1); |
|||
CHECK(secp256k1_schnorr_verify(ctx, message, schnorr_signature, &pubkey) == 1); |
|||
CHECK(secp256k1_schnorr_recover(ctx, message, schnorr_signature, &recpubkey) == 1); |
|||
CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0); |
|||
/* Destroy signature and verify again. */ |
|||
schnorr_signature[secp256k1_rand32() % 64] += 1 + (secp256k1_rand32() % 255); |
|||
CHECK(secp256k1_schnorr_verify(ctx, message, schnorr_signature, &pubkey) == 0); |
|||
CHECK(secp256k1_schnorr_recover(ctx, message, schnorr_signature, &recpubkey) != 1 || |
|||
memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0); |
|||
} |
|||
|
|||
/** Horribly broken hash function. Do not use for anything but tests. */ |
|||
void test_schnorr_hash(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) { |
|||
int i; |
|||
for (i = 0; i < 32; i++) { |
|||
h32[i] = r32[i] ^ msg32[i]; |
|||
} |
|||
} |
|||
|
|||
void test_schnorr_sign_verify(void) { |
|||
unsigned char msg32[32]; |
|||
unsigned char sig64[3][64]; |
|||
secp256k1_gej_t pubkeyj[3]; |
|||
secp256k1_ge_t pubkey[3]; |
|||
secp256k1_scalar_t nonce[3], key[3]; |
|||
int i = 0; |
|||
int k; |
|||
|
|||
secp256k1_rand256_test(msg32); |
|||
|
|||
for (k = 0; k < 3; k++) { |
|||
random_scalar_order_test(&key[k]); |
|||
|
|||
do { |
|||
random_scalar_order_test(&nonce[k]); |
|||
if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64[k], &key[k], &nonce[k], NULL, &test_schnorr_hash, msg32)) { |
|||
break; |
|||
} |
|||
} while(1); |
|||
|
|||
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubkeyj[k], &key[k]); |
|||
secp256k1_ge_set_gej_var(&pubkey[k], &pubkeyj[k]); |
|||
CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32)); |
|||
|
|||
for (i = 0; i < 4; i++) { |
|||
int pos = secp256k1_rand32() % 64; |
|||
int mod = 1 + (secp256k1_rand32() % 255); |
|||
sig64[k][pos] ^= mod; |
|||
CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32) == 0); |
|||
sig64[k][pos] ^= mod; |
|||
} |
|||
} |
|||
} |
|||
|
|||
void test_schnorr_threshold(void) { |
|||
unsigned char msg[32]; |
|||
unsigned char sec[5][32]; |
|||
secp256k1_pubkey_t pub[5]; |
|||
unsigned char nonce[5][32]; |
|||
secp256k1_pubkey_t pubnonce[5]; |
|||
unsigned char sig[5][64]; |
|||
const unsigned char* sigs[5]; |
|||
unsigned char allsig[64]; |
|||
const secp256k1_pubkey_t* pubs[5]; |
|||
secp256k1_pubkey_t allpub; |
|||
int n, i; |
|||
int damage; |
|||
int ret = 0; |
|||
|
|||
damage = (secp256k1_rand32() % 2) ? (1 + (secp256k1_rand32() % 4)) : 0; |
|||
secp256k1_rand256_test(msg); |
|||
n = 2 + (secp256k1_rand32() % 4); |
|||
for (i = 0; i < n; i++) { |
|||
do { |
|||
secp256k1_rand256_test(sec[i]); |
|||
} while (!secp256k1_ec_seckey_verify(ctx, sec[i])); |
|||
CHECK(secp256k1_ec_pubkey_create(ctx, &pub[i], sec[i])); |
|||
CHECK(secp256k1_schnorr_generate_nonce_pair(ctx, msg, sec[i], NULL, NULL, &pubnonce[i], nonce[i])); |
|||
pubs[i] = &pub[i]; |
|||
} |
|||
if (damage == 1) { |
|||
nonce[secp256k1_rand32() % n][secp256k1_rand32() % 32] ^= 1 + (secp256k1_rand32() % 255); |
|||
} else if (damage == 2) { |
|||
sec[secp256k1_rand32() % n][secp256k1_rand32() % 32] ^= 1 + (secp256k1_rand32() % 255); |
|||
} |
|||
for (i = 0; i < n; i++) { |
|||
secp256k1_pubkey_t allpubnonce; |
|||
const secp256k1_pubkey_t *pubnonces[4]; |
|||
int j; |
|||
for (j = 0; j < i; j++) { |
|||
pubnonces[j] = &pubnonce[j]; |
|||
} |
|||
for (j = i + 1; j < n; j++) { |
|||
pubnonces[j - 1] = &pubnonce[j]; |
|||
} |
|||
CHECK(secp256k1_ec_pubkey_combine(ctx, &allpubnonce, n - 1, pubnonces)); |
|||
ret |= (secp256k1_schnorr_partial_sign(ctx, msg, sig[i], sec[i], nonce[i], &allpubnonce) != 1) * 1; |
|||
sigs[i] = sig[i]; |
|||
} |
|||
if (damage == 3) { |
|||
sig[secp256k1_rand32() % n][secp256k1_rand32() % 64] ^= 1 + (secp256k1_rand32() % 255); |
|||
} |
|||
ret |= (secp256k1_ec_pubkey_combine(ctx, &allpub, n, pubs) != 1) * 2; |
|||
if ((ret & 1) == 0) { |
|||
ret |= (secp256k1_schnorr_partial_combine(ctx, allsig, n, sigs) != 1) * 4; |
|||
} |
|||
if (damage == 4) { |
|||
allsig[secp256k1_rand32() % 32] ^= 1 + (secp256k1_rand32() % 255); |
|||
} |
|||
if ((ret & 7) == 0) { |
|||
ret |= (secp256k1_schnorr_verify(ctx, msg, allsig, &allpub) != 1) * 8; |
|||
} |
|||
CHECK((ret == 0) == (damage == 0)); |
|||
} |
|||
|
|||
void test_schnorr_recovery(void) { |
|||
unsigned char msg32[32]; |
|||
unsigned char sig64[64]; |
|||
secp256k1_ge_t Q; |
|||
|
|||
secp256k1_rand256_test(msg32); |
|||
secp256k1_rand256_test(sig64); |
|||
secp256k1_rand256_test(sig64 + 32); |
|||
if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1) { |
|||
CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1); |
|||
} |
|||
} |
|||
|
|||
void run_schnorr_tests(void) { |
|||
int i; |
|||
for (i = 0; i < 32*count; i++) { |
|||
test_schnorr_end_to_end(); |
|||
} |
|||
for (i = 0; i < 32 * count; i++) { |
|||
test_schnorr_sign_verify(); |
|||
} |
|||
for (i = 0; i < 16 * count; i++) { |
|||
test_schnorr_recovery(); |
|||
} |
|||
for (i = 0; i < 10 * count; i++) { |
|||
test_schnorr_threshold(); |
|||
} |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,68 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_NUM_ |
|||
#define _SECP256K1_NUM_ |
|||
|
|||
#ifndef USE_NUM_GMP |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
#if defined(USE_NUM_GMP) |
|||
#include "num_gmp.h" |
|||
#else |
|||
#error "Please select num implementation" |
|||
#endif |
|||
|
|||
/** Copy a number. */ |
|||
static void secp256k1_num_copy(secp256k1_num_t *r, const secp256k1_num_t *a); |
|||
|
|||
/** Convert a number's absolute value to a binary big-endian string.
|
|||
* There must be enough place. */ |
|||
static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num_t *a); |
|||
|
|||
/** Set a number to the value of a binary big-endian string. */ |
|||
static void secp256k1_num_set_bin(secp256k1_num_t *r, const unsigned char *a, unsigned int alen); |
|||
|
|||
/** Compute a modular inverse. The input must be less than the modulus. */ |
|||
static void secp256k1_num_mod_inverse(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *m); |
|||
|
|||
/** Compare the absolute value of two numbers. */ |
|||
static int secp256k1_num_cmp(const secp256k1_num_t *a, const secp256k1_num_t *b); |
|||
|
|||
/** Test whether two number are equal (including sign). */ |
|||
static int secp256k1_num_eq(const secp256k1_num_t *a, const secp256k1_num_t *b); |
|||
|
|||
/** Add two (signed) numbers. */ |
|||
static void secp256k1_num_add(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b); |
|||
|
|||
/** Subtract two (signed) numbers. */ |
|||
static void secp256k1_num_sub(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b); |
|||
|
|||
/** Multiply two (signed) numbers. */ |
|||
static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b); |
|||
|
|||
/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1,
|
|||
even if r was negative. */ |
|||
static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m); |
|||
|
|||
/** Right-shift the passed number by bits bits. */ |
|||
static void secp256k1_num_shift(secp256k1_num_t *r, int bits); |
|||
|
|||
/** Check whether a number is zero. */ |
|||
static int secp256k1_num_is_zero(const secp256k1_num_t *a); |
|||
|
|||
/** Check whether a number is strictly negative. */ |
|||
static int secp256k1_num_is_neg(const secp256k1_num_t *a); |
|||
|
|||
/** Change a number's sign. */ |
|||
static void secp256k1_num_negate(secp256k1_num_t *r); |
|||
|
|||
#endif |
|||
|
|||
#endif |
@ -0,0 +1,20 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_NUM_REPR_ |
|||
#define _SECP256K1_NUM_REPR_ |
|||
|
|||
#include "/usr/include/gmp.h" |
|||
|
|||
#define NUM_LIMBS ((256+GMP_NUMB_BITS-1)/GMP_NUMB_BITS) |
|||
|
|||
typedef struct { |
|||
mp_limb_t data[2*NUM_LIMBS]; |
|||
int neg; |
|||
int limbs; |
|||
} secp256k1_num_t; |
|||
|
|||
#endif |
@ -0,0 +1,260 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_NUM_REPR_IMPL_H_ |
|||
#define _SECP256K1_NUM_REPR_IMPL_H_ |
|||
|
|||
#include <string.h> |
|||
#include <stdlib.h> |
|||
#include "/usr/include/gmp.h" |
|||
|
|||
#include "util.h" |
|||
#include "num.h" |
|||
|
|||
#ifdef VERIFY |
|||
static void secp256k1_num_sanity(const secp256k1_num_t *a) { |
|||
VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0)); |
|||
} |
|||
#else |
|||
#define secp256k1_num_sanity(a) do { } while(0) |
|||
#endif |
|||
|
|||
static void secp256k1_num_copy(secp256k1_num_t *r, const secp256k1_num_t *a) { |
|||
*r = *a; |
|||
} |
|||
|
|||
static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num_t *a) { |
|||
unsigned char tmp[65]; |
|||
int len = 0; |
|||
int shift = 0; |
|||
if (a->limbs>1 || a->data[0] != 0) { |
|||
len = mpn_get_str(tmp, 256, (mp_limb_t*)a->data, a->limbs); |
|||
} |
|||
while (shift < len && tmp[shift] == 0) shift++; |
|||
VERIFY_CHECK(len-shift <= (int)rlen); |
|||
memset(r, 0, rlen - len + shift); |
|||
if (len > shift) { |
|||
memcpy(r + rlen - len + shift, tmp + shift, len - shift); |
|||
} |
|||
memset(tmp, 0, sizeof(tmp)); |
|||
} |
|||
|
|||
static void secp256k1_num_set_bin(secp256k1_num_t *r, const unsigned char *a, unsigned int alen) { |
|||
int len; |
|||
VERIFY_CHECK(alen > 0); |
|||
VERIFY_CHECK(alen <= 64); |
|||
len = mpn_set_str(r->data, a, alen, 256); |
|||
if (len == 0) { |
|||
r->data[0] = 0; |
|||
len = 1; |
|||
} |
|||
VERIFY_CHECK(len <= NUM_LIMBS*2); |
|||
r->limbs = len; |
|||
r->neg = 0; |
|||
while (r->limbs > 1 && r->data[r->limbs-1]==0) { |
|||
r->limbs--; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_num_add_abs(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) { |
|||
mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs); |
|||
r->limbs = a->limbs; |
|||
if (c != 0) { |
|||
VERIFY_CHECK(r->limbs < 2*NUM_LIMBS); |
|||
r->data[r->limbs++] = c; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_num_sub_abs(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) { |
|||
mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs); |
|||
VERIFY_CHECK(c == 0); |
|||
r->limbs = a->limbs; |
|||
while (r->limbs > 1 && r->data[r->limbs-1]==0) { |
|||
r->limbs--; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m) { |
|||
secp256k1_num_sanity(r); |
|||
secp256k1_num_sanity(m); |
|||
|
|||
if (r->limbs >= m->limbs) { |
|||
mp_limb_t t[2*NUM_LIMBS]; |
|||
mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs); |
|||
memset(t, 0, sizeof(t)); |
|||
r->limbs = m->limbs; |
|||
while (r->limbs > 1 && r->data[r->limbs-1]==0) { |
|||
r->limbs--; |
|||
} |
|||
} |
|||
|
|||
if (r->neg && (r->limbs > 1 || r->data[0] != 0)) { |
|||
secp256k1_num_sub_abs(r, m, r); |
|||
r->neg = 0; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_num_mod_inverse(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *m) { |
|||
int i; |
|||
mp_limb_t g[NUM_LIMBS+1]; |
|||
mp_limb_t u[NUM_LIMBS+1]; |
|||
mp_limb_t v[NUM_LIMBS+1]; |
|||
mp_size_t sn; |
|||
mp_size_t gn; |
|||
secp256k1_num_sanity(a); |
|||
secp256k1_num_sanity(m); |
|||
|
|||
/** mpn_gcdext computes: (G,S) = gcdext(U,V), where
|
|||
* * G = gcd(U,V) |
|||
* * G = U*S + V*T |
|||
* * U has equal or more limbs than V, and V has no padding |
|||
* If we set U to be (a padded version of) a, and V = m: |
|||
* G = a*S + m*T |
|||
* G = a*S mod m |
|||
* Assuming G=1: |
|||
* S = 1/a mod m |
|||
*/ |
|||
VERIFY_CHECK(m->limbs <= NUM_LIMBS); |
|||
VERIFY_CHECK(m->data[m->limbs-1] != 0); |
|||
for (i = 0; i < m->limbs; i++) { |
|||
u[i] = (i < a->limbs) ? a->data[i] : 0; |
|||
v[i] = m->data[i]; |
|||
} |
|||
sn = NUM_LIMBS+1; |
|||
gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs); |
|||
VERIFY_CHECK(gn == 1); |
|||
VERIFY_CHECK(g[0] == 1); |
|||
r->neg = a->neg ^ m->neg; |
|||
if (sn < 0) { |
|||
mpn_sub(r->data, m->data, m->limbs, r->data, -sn); |
|||
r->limbs = m->limbs; |
|||
while (r->limbs > 1 && r->data[r->limbs-1]==0) { |
|||
r->limbs--; |
|||
} |
|||
} else { |
|||
r->limbs = sn; |
|||
} |
|||
memset(g, 0, sizeof(g)); |
|||
memset(u, 0, sizeof(u)); |
|||
memset(v, 0, sizeof(v)); |
|||
} |
|||
|
|||
static int secp256k1_num_is_zero(const secp256k1_num_t *a) { |
|||
return (a->limbs == 1 && a->data[0] == 0); |
|||
} |
|||
|
|||
static int secp256k1_num_is_neg(const secp256k1_num_t *a) { |
|||
return (a->limbs > 1 || a->data[0] != 0) && a->neg; |
|||
} |
|||
|
|||
static int secp256k1_num_cmp(const secp256k1_num_t *a, const secp256k1_num_t *b) { |
|||
if (a->limbs > b->limbs) { |
|||
return 1; |
|||
} |
|||
if (a->limbs < b->limbs) { |
|||
return -1; |
|||
} |
|||
return mpn_cmp(a->data, b->data, a->limbs); |
|||
} |
|||
|
|||
static int secp256k1_num_eq(const secp256k1_num_t *a, const secp256k1_num_t *b) { |
|||
if (a->limbs > b->limbs) { |
|||
return 0; |
|||
} |
|||
if (a->limbs < b->limbs) { |
|||
return 0; |
|||
} |
|||
if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) { |
|||
return 0; |
|||
} |
|||
return mpn_cmp(a->data, b->data, a->limbs) == 0; |
|||
} |
|||
|
|||
static void secp256k1_num_subadd(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b, int bneg) { |
|||
if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */ |
|||
r->neg = a->neg; |
|||
if (a->limbs >= b->limbs) { |
|||
secp256k1_num_add_abs(r, a, b); |
|||
} else { |
|||
secp256k1_num_add_abs(r, b, a); |
|||
} |
|||
} else { |
|||
if (secp256k1_num_cmp(a, b) > 0) { |
|||
r->neg = a->neg; |
|||
secp256k1_num_sub_abs(r, a, b); |
|||
} else { |
|||
r->neg = b->neg ^ bneg; |
|||
secp256k1_num_sub_abs(r, b, a); |
|||
} |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_num_add(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) { |
|||
secp256k1_num_sanity(a); |
|||
secp256k1_num_sanity(b); |
|||
secp256k1_num_subadd(r, a, b, 0); |
|||
} |
|||
|
|||
static void secp256k1_num_sub(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) { |
|||
secp256k1_num_sanity(a); |
|||
secp256k1_num_sanity(b); |
|||
secp256k1_num_subadd(r, a, b, 1); |
|||
} |
|||
|
|||
static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) { |
|||
mp_limb_t tmp[2*NUM_LIMBS+1]; |
|||
secp256k1_num_sanity(a); |
|||
secp256k1_num_sanity(b); |
|||
|
|||
VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1); |
|||
if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) { |
|||
r->limbs = 1; |
|||
r->neg = 0; |
|||
r->data[0] = 0; |
|||
return; |
|||
} |
|||
if (a->limbs >= b->limbs) { |
|||
mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs); |
|||
} else { |
|||
mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs); |
|||
} |
|||
r->limbs = a->limbs + b->limbs; |
|||
if (r->limbs > 1 && tmp[r->limbs - 1]==0) { |
|||
r->limbs--; |
|||
} |
|||
VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS); |
|||
mpn_copyi(r->data, tmp, r->limbs); |
|||
r->neg = a->neg ^ b->neg; |
|||
memset(tmp, 0, sizeof(tmp)); |
|||
} |
|||
|
|||
static void secp256k1_num_shift(secp256k1_num_t *r, int bits) { |
|||
int i; |
|||
if (bits % GMP_NUMB_BITS) { |
|||
/* Shift within limbs. */ |
|||
mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS); |
|||
} |
|||
if (bits >= GMP_NUMB_BITS) { |
|||
/* Shift full limbs. */ |
|||
for (i = 0; i < r->limbs; i++) { |
|||
int index = i + (bits / GMP_NUMB_BITS); |
|||
if (index < r->limbs && index < 2*NUM_LIMBS) { |
|||
r->data[i] = r->data[index]; |
|||
} else { |
|||
r->data[i] = 0; |
|||
} |
|||
} |
|||
} |
|||
while (r->limbs>1 && r->data[r->limbs-1]==0) { |
|||
r->limbs--; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_num_negate(secp256k1_num_t *r) { |
|||
r->neg ^= 1; |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,24 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_NUM_IMPL_H_ |
|||
#define _SECP256K1_NUM_IMPL_H_ |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
#include "num.h" |
|||
|
|||
#if defined(USE_NUM_GMP) |
|||
#include "num_gmp_impl.h" |
|||
#elif defined(USE_NUM_NONE) |
|||
/* Nothing. */ |
|||
#else |
|||
#error "Please select num implementation" |
|||
#endif |
|||
|
|||
#endif |
@ -0,0 +1,23 @@ |
|||
#include "org_bitcoin_NativeSecp256k1.h" |
|||
#include "include/secp256k1.h" |
|||
|
|||
JNIEXPORT jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify |
|||
(JNIEnv* env, jclass classObject, jobject byteBufferObject) |
|||
{ |
|||
unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); |
|||
int sigLen = *((int*)(data + 32)); |
|||
int pubLen = *((int*)(data + 32 + 4)); |
|||
|
|||
return secp256k1_ecdsa_verify(data, 32, data+32+8, sigLen, data+32+8+sigLen, pubLen); |
|||
} |
|||
|
|||
static void __javasecp256k1_attach(void) __attribute__((constructor)); |
|||
static void __javasecp256k1_detach(void) __attribute__((destructor)); |
|||
|
|||
static void __javasecp256k1_attach(void) { |
|||
secp256k1_start(SECP256K1_START_VERIFY); |
|||
} |
|||
|
|||
static void __javasecp256k1_detach(void) { |
|||
secp256k1_stop(); |
|||
} |
@ -0,0 +1,21 @@ |
|||
/* DO NOT EDIT THIS FILE - it is machine generated */ |
|||
#include <jni.h> |
|||
/* Header for class org_bitcoin_NativeSecp256k1 */ |
|||
|
|||
#ifndef _Included_org_bitcoin_NativeSecp256k1 |
|||
#define _Included_org_bitcoin_NativeSecp256k1 |
|||
#ifdef __cplusplus |
|||
extern "C" { |
|||
#endif |
|||
/*
|
|||
* Class: org_bitcoin_NativeSecp256k1 |
|||
* Method: secp256k1_ecdsa_verify |
|||
* Signature: (Ljava/nio/ByteBuffer;)I |
|||
*/ |
|||
JNIEXPORT jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify |
|||
(JNIEnv *, jclass, jobject); |
|||
|
|||
#ifdef __cplusplus |
|||
} |
|||
#endif |
|||
#endif |
@ -0,0 +1,105 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_SCALAR_ |
|||
#define _SECP256K1_SCALAR_ |
|||
|
|||
#define USE_SCALAR_8X32 1 |
|||
#include "num.h" |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
#if defined(USE_SCALAR_4X64) |
|||
#include "scalar_4x64.h" |
|||
#elif defined(USE_SCALAR_8X32) |
|||
#include "scalar_8x32.h" |
|||
#else |
|||
#error "Please select scalar implementation" |
|||
#endif |
|||
|
|||
/** Clear a scalar to prevent the leak of sensitive data. */ |
|||
static void secp256k1_scalar_clear(secp256k1_scalar_t *r); |
|||
|
|||
/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */ |
|||
static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count); |
|||
|
|||
/** Access bits from a scalar. Not constant time. */ |
|||
static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count); |
|||
|
|||
/** Set a scalar from a big endian byte array. */ |
|||
static void secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *bin, int *overflow); |
|||
|
|||
/** Set a scalar to an unsigned integer. */ |
|||
static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v); |
|||
|
|||
/** Convert a scalar to a byte array. */ |
|||
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a); |
|||
|
|||
/** Add two scalars together (modulo the group order). Returns whether it overflowed. */ |
|||
static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); |
|||
|
|||
/** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */ |
|||
static void secp256k1_scalar_cadd_bit(secp256k1_scalar_t *r, unsigned int bit, int flag); |
|||
|
|||
/** Multiply two scalars (modulo the group order). */ |
|||
static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); |
|||
|
|||
/** Shift a scalar right by some amount strictly between 0 and 16, returning
|
|||
* the low bits that were shifted off */ |
|||
static int secp256k1_scalar_shr_int(secp256k1_scalar_t *r, int n); |
|||
|
|||
/** Compute the square of a scalar (modulo the group order). */ |
|||
static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a); |
|||
|
|||
/** Compute the inverse of a scalar (modulo the group order). */ |
|||
static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scalar_t *a); |
|||
|
|||
/** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */ |
|||
static void secp256k1_scalar_inverse_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a); |
|||
|
|||
/** Compute the complement of a scalar (modulo the group order). */ |
|||
static void secp256k1_scalar_negate(secp256k1_scalar_t *r, const secp256k1_scalar_t *a); |
|||
|
|||
/** Check whether a scalar equals zero. */ |
|||
static int secp256k1_scalar_is_zero(const secp256k1_scalar_t *a); |
|||
|
|||
/** Check whether a scalar equals one. */ |
|||
static int secp256k1_scalar_is_one(const secp256k1_scalar_t *a); |
|||
|
|||
/** Check whether a scalar, considered as an nonnegative integer, is even. */ |
|||
static int secp256k1_scalar_is_even(const secp256k1_scalar_t *a); |
|||
|
|||
/** Check whether a scalar is higher than the group order divided by 2. */ |
|||
static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a); |
|||
|
|||
/** Conditionally negate a number, in constant time.
|
|||
* Returns -1 if the number was negated, 1 otherwise */ |
|||
static int secp256k1_scalar_cond_negate(secp256k1_scalar_t *a, int flag); |
|||
|
|||
#ifndef USE_NUM_NONE |
|||
/** Convert a scalar to a number. */ |
|||
static void secp256k1_scalar_get_num(secp256k1_num_t *r, const secp256k1_scalar_t *a); |
|||
|
|||
/** Get the order of the group as a number. */ |
|||
static void secp256k1_scalar_order_get_num(secp256k1_num_t *r); |
|||
#endif |
|||
|
|||
/** Compare two scalars. */ |
|||
static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
/** Find r1 and r2 such that r1+r2*2^128 = a. */ |
|||
static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a); |
|||
/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */ |
|||
static void secp256k1_scalar_split_lambda(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a); |
|||
#endif |
|||
|
|||
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ |
|||
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift); |
|||
|
|||
#endif |
@ -0,0 +1,19 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_SCALAR_REPR_ |
|||
#define _SECP256K1_SCALAR_REPR_ |
|||
|
|||
#include <stdint.h> |
|||
|
|||
/** A scalar modulo the group order of the secp256k1 curve. */ |
|||
typedef struct { |
|||
uint64_t d[4]; |
|||
} secp256k1_scalar_t; |
|||
|
|||
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}} |
|||
|
|||
#endif |
@ -0,0 +1,947 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ |
|||
#define _SECP256K1_SCALAR_REPR_IMPL_H_ |
|||
|
|||
/* Limbs of the secp256k1 order. */ |
|||
#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) |
|||
#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) |
|||
#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL) |
|||
#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) |
|||
|
|||
/* Limbs of 2^256 minus the secp256k1 order. */ |
|||
#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) |
|||
#define SECP256K1_N_C_1 (~SECP256K1_N_1) |
|||
#define SECP256K1_N_C_2 (1) |
|||
|
|||
/* Limbs of half the secp256k1 order. */ |
|||
#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL) |
|||
#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL) |
|||
#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) |
|||
#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) |
|||
|
|||
SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar_t *r) { |
|||
r->d[0] = 0; |
|||
r->d[1] = 0; |
|||
r->d[2] = 0; |
|||
r->d[3] = 0; |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v) { |
|||
r->d[0] = v; |
|||
r->d[1] = 0; |
|||
r->d[2] = 0; |
|||
r->d[3] = 0; |
|||
} |
|||
|
|||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) { |
|||
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); |
|||
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); |
|||
} |
|||
|
|||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) { |
|||
VERIFY_CHECK(count < 32); |
|||
VERIFY_CHECK(offset + count <= 256); |
|||
if ((offset + count - 1) >> 6 == offset >> 6) { |
|||
return secp256k1_scalar_get_bits(a, offset, count); |
|||
} else { |
|||
VERIFY_CHECK((offset >> 6) + 1 < 4); |
|||
return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1); |
|||
} |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) { |
|||
int yes = 0; |
|||
int no = 0; |
|||
no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */ |
|||
no |= (a->d[2] < SECP256K1_N_2); |
|||
yes |= (a->d[2] > SECP256K1_N_2) & ~no; |
|||
no |= (a->d[1] < SECP256K1_N_1); |
|||
yes |= (a->d[1] > SECP256K1_N_1) & ~no; |
|||
yes |= (a->d[0] >= SECP256K1_N_0) & ~no; |
|||
return yes; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, unsigned int overflow) { |
|||
uint128_t t; |
|||
VERIFY_CHECK(overflow <= 1); |
|||
t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0; |
|||
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1; |
|||
r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2; |
|||
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint64_t)r->d[3]; |
|||
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; |
|||
return overflow; |
|||
} |
|||
|
|||
static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { |
|||
int overflow; |
|||
uint128_t t = (uint128_t)a->d[0] + b->d[0]; |
|||
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint128_t)a->d[1] + b->d[1]; |
|||
r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint128_t)a->d[2] + b->d[2]; |
|||
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint128_t)a->d[3] + b->d[3]; |
|||
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
overflow = t + secp256k1_scalar_check_overflow(r); |
|||
VERIFY_CHECK(overflow == 0 || overflow == 1); |
|||
secp256k1_scalar_reduce(r, overflow); |
|||
return overflow; |
|||
} |
|||
|
|||
static void secp256k1_scalar_cadd_bit(secp256k1_scalar_t *r, unsigned int bit, int flag) { |
|||
uint128_t t; |
|||
VERIFY_CHECK(bit < 256); |
|||
bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ |
|||
t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F)); |
|||
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F)); |
|||
r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F)); |
|||
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; |
|||
t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); |
|||
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK((t >> 64) == 0); |
|||
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *b32, int *overflow) { |
|||
int over; |
|||
r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56; |
|||
r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56; |
|||
r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56; |
|||
r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56; |
|||
over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r)); |
|||
if (overflow) { |
|||
*overflow = over; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a) { |
|||
bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3]; |
|||
bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2]; |
|||
bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1]; |
|||
bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar_t *a) { |
|||
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; |
|||
} |
|||
|
|||
static void secp256k1_scalar_negate(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { |
|||
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0); |
|||
uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1; |
|||
r->d[0] = t & nonzero; t >>= 64; |
|||
t += (uint128_t)(~a->d[1]) + SECP256K1_N_1; |
|||
r->d[1] = t & nonzero; t >>= 64; |
|||
t += (uint128_t)(~a->d[2]) + SECP256K1_N_2; |
|||
r->d[2] = t & nonzero; t >>= 64; |
|||
t += (uint128_t)(~a->d[3]) + SECP256K1_N_3; |
|||
r->d[3] = t & nonzero; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar_t *a) { |
|||
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; |
|||
} |
|||
|
|||
static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { |
|||
int yes = 0; |
|||
int no = 0; |
|||
no |= (a->d[3] < SECP256K1_N_H_3); |
|||
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; |
|||
no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */ |
|||
no |= (a->d[1] < SECP256K1_N_H_1) & ~yes; |
|||
yes |= (a->d[1] > SECP256K1_N_H_1) & ~no; |
|||
yes |= (a->d[0] > SECP256K1_N_H_0) & ~no; |
|||
return yes; |
|||
} |
|||
|
|||
static int secp256k1_scalar_cond_negate(secp256k1_scalar_t *r, int flag) { |
|||
/* If we are flag = 0, mask = 00...00 and this is a no-op;
|
|||
* if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ |
|||
uint64_t mask = !flag - 1; |
|||
uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1; |
|||
uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); |
|||
r->d[0] = t & nonzero; t >>= 64; |
|||
t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); |
|||
r->d[1] = t & nonzero; t >>= 64; |
|||
t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); |
|||
r->d[2] = t & nonzero; t >>= 64; |
|||
t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); |
|||
r->d[3] = t & nonzero; |
|||
return 2 * (mask == 0) - 1; |
|||
} |
|||
|
|||
/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ |
|||
|
|||
/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ |
|||
#define muladd(a,b) { \ |
|||
uint64_t tl, th; \ |
|||
{ \ |
|||
uint128_t t = (uint128_t)a * b; \ |
|||
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ |
|||
tl = t; \ |
|||
} \ |
|||
c0 += tl; /* overflow is handled on the next line */ \ |
|||
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ |
|||
c1 += th; /* overflow is handled on the next line */ \ |
|||
c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \ |
|||
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ |
|||
} |
|||
|
|||
/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */ |
|||
#define muladd_fast(a,b) { \ |
|||
uint64_t tl, th; \ |
|||
{ \ |
|||
uint128_t t = (uint128_t)a * b; \ |
|||
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ |
|||
tl = t; \ |
|||
} \ |
|||
c0 += tl; /* overflow is handled on the next line */ \ |
|||
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ |
|||
c1 += th; /* never overflows by contract (verified in the next line) */ \ |
|||
VERIFY_CHECK(c1 >= th); \ |
|||
} |
|||
|
|||
/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ |
|||
#define muladd2(a,b) { \ |
|||
uint64_t tl, th, th2, tl2; \ |
|||
{ \ |
|||
uint128_t t = (uint128_t)a * b; \ |
|||
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ |
|||
tl = t; \ |
|||
} \ |
|||
th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \ |
|||
c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ |
|||
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ |
|||
tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \ |
|||
th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ |
|||
c0 += tl2; /* overflow is handled on the next line */ \ |
|||
th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \ |
|||
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ |
|||
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ |
|||
c1 += th2; /* overflow is handled on the next line */ \ |
|||
c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ |
|||
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ |
|||
} |
|||
|
|||
/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ |
|||
#define sumadd(a) { \ |
|||
unsigned int over; \ |
|||
c0 += (a); /* overflow is handled on the next line */ \ |
|||
over = (c0 < (a)) ? 1 : 0; \ |
|||
c1 += over; /* overflow is handled on the next line */ \ |
|||
c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \ |
|||
} |
|||
|
|||
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */ |
|||
#define sumadd_fast(a) { \ |
|||
c0 += (a); /* overflow is handled on the next line */ \ |
|||
c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ |
|||
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ |
|||
VERIFY_CHECK(c2 == 0); \ |
|||
} |
|||
|
|||
/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */ |
|||
#define extract(n) { \ |
|||
(n) = c0; \ |
|||
c0 = c1; \ |
|||
c1 = c2; \ |
|||
c2 = 0; \ |
|||
} |
|||
|
|||
/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */ |
|||
#define extract_fast(n) { \ |
|||
(n) = c0; \ |
|||
c0 = c1; \ |
|||
c1 = 0; \ |
|||
VERIFY_CHECK(c2 == 0); \ |
|||
} |
|||
|
|||
static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l) { |
|||
#ifdef USE_ASM_X86_64 |
|||
/* Reduce 512 bits into 385. */ |
|||
uint64_t m0, m1, m2, m3, m4, m5, m6; |
|||
uint64_t p0, p1, p2, p3, p4; |
|||
uint64_t c; |
|||
|
|||
__asm__ __volatile__( |
|||
/* Preload. */ |
|||
"movq 32(%%rsi), %%r11\n" |
|||
"movq 40(%%rsi), %%r12\n" |
|||
"movq 48(%%rsi), %%r13\n" |
|||
"movq 56(%%rsi), %%r14\n" |
|||
/* Initialize r8,r9,r10 */ |
|||
"movq 0(%%rsi), %%r8\n" |
|||
"movq $0, %%r9\n" |
|||
"movq $0, %%r10\n" |
|||
/* (r8,r9) += n0 * c0 */ |
|||
"movq %8, %%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
/* extract m0 */ |
|||
"movq %%r8, %q0\n" |
|||
"movq $0, %%r8\n" |
|||
/* (r9,r10) += l1 */ |
|||
"addq 8(%%rsi), %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r9,r10,r8) += n1 * c0 */ |
|||
"movq %8, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* (r9,r10,r8) += n0 * c1 */ |
|||
"movq %9, %%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* extract m1 */ |
|||
"movq %%r9, %q1\n" |
|||
"movq $0, %%r9\n" |
|||
/* (r10,r8,r9) += l2 */ |
|||
"addq 16(%%rsi), %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += n2 * c0 */ |
|||
"movq %8, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += n1 * c1 */ |
|||
"movq %9, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += n0 */ |
|||
"addq %%r11, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* extract m2 */ |
|||
"movq %%r10, %q2\n" |
|||
"movq $0, %%r10\n" |
|||
/* (r8,r9,r10) += l3 */ |
|||
"addq 24(%%rsi), %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r8,r9,r10) += n3 * c0 */ |
|||
"movq %8, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r8,r9,r10) += n2 * c1 */ |
|||
"movq %9, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r8,r9,r10) += n1 */ |
|||
"addq %%r12, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* extract m3 */ |
|||
"movq %%r8, %q3\n" |
|||
"movq $0, %%r8\n" |
|||
/* (r9,r10,r8) += n3 * c1 */ |
|||
"movq %9, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* (r9,r10,r8) += n2 */ |
|||
"addq %%r13, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* extract m4 */ |
|||
"movq %%r9, %q4\n" |
|||
/* (r10,r8) += n3 */ |
|||
"addq %%r14, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* extract m5 */ |
|||
"movq %%r10, %q5\n" |
|||
/* extract m6 */ |
|||
"movq %%r8, %q6\n" |
|||
: "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6) |
|||
: "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) |
|||
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc"); |
|||
|
|||
/* Reduce 385 bits into 258. */ |
|||
__asm__ __volatile__( |
|||
/* Preload */ |
|||
"movq %q9, %%r11\n" |
|||
"movq %q10, %%r12\n" |
|||
"movq %q11, %%r13\n" |
|||
/* Initialize (r8,r9,r10) */ |
|||
"movq %q5, %%r8\n" |
|||
"movq $0, %%r9\n" |
|||
"movq $0, %%r10\n" |
|||
/* (r8,r9) += m4 * c0 */ |
|||
"movq %12, %%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
/* extract p0 */ |
|||
"movq %%r8, %q0\n" |
|||
"movq $0, %%r8\n" |
|||
/* (r9,r10) += m1 */ |
|||
"addq %q6, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r9,r10,r8) += m5 * c0 */ |
|||
"movq %12, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* (r9,r10,r8) += m4 * c1 */ |
|||
"movq %13, %%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* extract p1 */ |
|||
"movq %%r9, %q1\n" |
|||
"movq $0, %%r9\n" |
|||
/* (r10,r8,r9) += m2 */ |
|||
"addq %q7, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += m6 * c0 */ |
|||
"movq %12, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += m5 * c1 */ |
|||
"movq %13, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += m4 */ |
|||
"addq %%r11, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* extract p2 */ |
|||
"movq %%r10, %q2\n" |
|||
/* (r8,r9) += m3 */ |
|||
"addq %q8, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r8,r9) += m6 * c1 */ |
|||
"movq %13, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
/* (r8,r9) += m5 */ |
|||
"addq %%r12, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* extract p3 */ |
|||
"movq %%r8, %q3\n" |
|||
/* (r9) += m6 */ |
|||
"addq %%r13, %%r9\n" |
|||
/* extract p4 */ |
|||
"movq %%r9, %q4\n" |
|||
: "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4) |
|||
: "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) |
|||
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc"); |
|||
|
|||
/* Reduce 258 bits into 256. */ |
|||
__asm__ __volatile__( |
|||
/* Preload */ |
|||
"movq %q5, %%r10\n" |
|||
/* (rax,rdx) = p4 * c0 */ |
|||
"movq %7, %%rax\n" |
|||
"mulq %%r10\n" |
|||
/* (rax,rdx) += p0 */ |
|||
"addq %q1, %%rax\n" |
|||
"adcq $0, %%rdx\n" |
|||
/* extract r0 */ |
|||
"movq %%rax, 0(%q6)\n" |
|||
/* Move to (r8,r9) */ |
|||
"movq %%rdx, %%r8\n" |
|||
"movq $0, %%r9\n" |
|||
/* (r8,r9) += p1 */ |
|||
"addq %q2, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r8,r9) += p4 * c1 */ |
|||
"movq %8, %%rax\n" |
|||
"mulq %%r10\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
/* Extract r1 */ |
|||
"movq %%r8, 8(%q6)\n" |
|||
"movq $0, %%r8\n" |
|||
/* (r9,r8) += p4 */ |
|||
"addq %%r10, %%r9\n" |
|||
"adcq $0, %%r8\n" |
|||
/* (r9,r8) += p2 */ |
|||
"addq %q3, %%r9\n" |
|||
"adcq $0, %%r8\n" |
|||
/* Extract r2 */ |
|||
"movq %%r9, 16(%q6)\n" |
|||
"movq $0, %%r9\n" |
|||
/* (r8,r9) += p3 */ |
|||
"addq %q4, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* Extract r3 */ |
|||
"movq %%r8, 24(%q6)\n" |
|||
/* Extract c */ |
|||
"movq %%r9, %q0\n" |
|||
: "=g"(c) |
|||
: "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) |
|||
: "rax", "rdx", "r8", "r9", "r10", "cc", "memory"); |
|||
#else |
|||
uint128_t c; |
|||
uint64_t c0, c1, c2; |
|||
uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7]; |
|||
uint64_t m0, m1, m2, m3, m4, m5; |
|||
uint32_t m6; |
|||
uint64_t p0, p1, p2, p3; |
|||
uint32_t p4; |
|||
|
|||
/* Reduce 512 bits into 385. */ |
|||
/* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */ |
|||
c0 = l[0]; c1 = 0; c2 = 0; |
|||
muladd_fast(n0, SECP256K1_N_C_0); |
|||
extract_fast(m0); |
|||
sumadd_fast(l[1]); |
|||
muladd(n1, SECP256K1_N_C_0); |
|||
muladd(n0, SECP256K1_N_C_1); |
|||
extract(m1); |
|||
sumadd(l[2]); |
|||
muladd(n2, SECP256K1_N_C_0); |
|||
muladd(n1, SECP256K1_N_C_1); |
|||
sumadd(n0); |
|||
extract(m2); |
|||
sumadd(l[3]); |
|||
muladd(n3, SECP256K1_N_C_0); |
|||
muladd(n2, SECP256K1_N_C_1); |
|||
sumadd(n1); |
|||
extract(m3); |
|||
muladd(n3, SECP256K1_N_C_1); |
|||
sumadd(n2); |
|||
extract(m4); |
|||
sumadd_fast(n3); |
|||
extract_fast(m5); |
|||
VERIFY_CHECK(c0 <= 1); |
|||
m6 = c0; |
|||
|
|||
/* Reduce 385 bits into 258. */ |
|||
/* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */ |
|||
c0 = m0; c1 = 0; c2 = 0; |
|||
muladd_fast(m4, SECP256K1_N_C_0); |
|||
extract_fast(p0); |
|||
sumadd_fast(m1); |
|||
muladd(m5, SECP256K1_N_C_0); |
|||
muladd(m4, SECP256K1_N_C_1); |
|||
extract(p1); |
|||
sumadd(m2); |
|||
muladd(m6, SECP256K1_N_C_0); |
|||
muladd(m5, SECP256K1_N_C_1); |
|||
sumadd(m4); |
|||
extract(p2); |
|||
sumadd_fast(m3); |
|||
muladd_fast(m6, SECP256K1_N_C_1); |
|||
sumadd_fast(m5); |
|||
extract_fast(p3); |
|||
p4 = c0 + m6; |
|||
VERIFY_CHECK(p4 <= 2); |
|||
|
|||
/* Reduce 258 bits into 256. */ |
|||
/* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */ |
|||
c = p0 + (uint128_t)SECP256K1_N_C_0 * p4; |
|||
r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; |
|||
c += p1 + (uint128_t)SECP256K1_N_C_1 * p4; |
|||
r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; |
|||
c += p2 + (uint128_t)p4; |
|||
r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; |
|||
c += p3; |
|||
r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; |
|||
#endif |
|||
|
|||
/* Final reduction of r. */ |
|||
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); |
|||
} |
|||
|
|||
static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { |
|||
#ifdef USE_ASM_X86_64 |
|||
const uint64_t *pb = b->d; |
|||
__asm__ __volatile__( |
|||
/* Preload */ |
|||
"movq 0(%%rdi), %%r15\n" |
|||
"movq 8(%%rdi), %%rbx\n" |
|||
"movq 16(%%rdi), %%rcx\n" |
|||
"movq 0(%%rdx), %%r11\n" |
|||
"movq 8(%%rdx), %%r12\n" |
|||
"movq 16(%%rdx), %%r13\n" |
|||
"movq 24(%%rdx), %%r14\n" |
|||
/* (rax,rdx) = a0 * b0 */ |
|||
"movq %%r15, %%rax\n" |
|||
"mulq %%r11\n" |
|||
/* Extract l0 */ |
|||
"movq %%rax, 0(%%rsi)\n" |
|||
/* (r8,r9,r10) = (rdx) */ |
|||
"movq %%rdx, %%r8\n" |
|||
"xorq %%r9, %%r9\n" |
|||
"xorq %%r10, %%r10\n" |
|||
/* (r8,r9,r10) += a0 * b1 */ |
|||
"movq %%r15, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r8,r9,r10) += a1 * b0 */ |
|||
"movq %%rbx, %%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* Extract l1 */ |
|||
"movq %%r8, 8(%%rsi)\n" |
|||
"xorq %%r8, %%r8\n" |
|||
/* (r9,r10,r8) += a0 * b2 */ |
|||
"movq %%r15, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* (r9,r10,r8) += a1 * b1 */ |
|||
"movq %%rbx, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* (r9,r10,r8) += a2 * b0 */ |
|||
"movq %%rcx, %%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* Extract l2 */ |
|||
"movq %%r9, 16(%%rsi)\n" |
|||
"xorq %%r9, %%r9\n" |
|||
/* (r10,r8,r9) += a0 * b3 */ |
|||
"movq %%r15, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* Preload a3 */ |
|||
"movq 24(%%rdi), %%r15\n" |
|||
/* (r10,r8,r9) += a1 * b2 */ |
|||
"movq %%rbx, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += a2 * b1 */ |
|||
"movq %%rcx, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += a3 * b0 */ |
|||
"movq %%r15, %%rax\n" |
|||
"mulq %%r11\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* Extract l3 */ |
|||
"movq %%r10, 24(%%rsi)\n" |
|||
"xorq %%r10, %%r10\n" |
|||
/* (r8,r9,r10) += a1 * b3 */ |
|||
"movq %%rbx, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r8,r9,r10) += a2 * b2 */ |
|||
"movq %%rcx, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r8,r9,r10) += a3 * b1 */ |
|||
"movq %%r15, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* Extract l4 */ |
|||
"movq %%r8, 32(%%rsi)\n" |
|||
"xorq %%r8, %%r8\n" |
|||
/* (r9,r10,r8) += a2 * b3 */ |
|||
"movq %%rcx, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* (r9,r10,r8) += a3 * b2 */ |
|||
"movq %%r15, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* Extract l5 */ |
|||
"movq %%r9, 40(%%rsi)\n" |
|||
/* (r10,r8) += a3 * b3 */ |
|||
"movq %%r15, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
/* Extract l6 */ |
|||
"movq %%r10, 48(%%rsi)\n" |
|||
/* Extract l7 */ |
|||
"movq %%r8, 56(%%rsi)\n" |
|||
: "+d"(pb) |
|||
: "S"(l), "D"(a->d) |
|||
: "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory"); |
|||
#else |
|||
/* 160 bit accumulator. */ |
|||
uint64_t c0 = 0, c1 = 0; |
|||
uint32_t c2 = 0; |
|||
|
|||
/* l[0..7] = a[0..3] * b[0..3]. */ |
|||
muladd_fast(a->d[0], b->d[0]); |
|||
extract_fast(l[0]); |
|||
muladd(a->d[0], b->d[1]); |
|||
muladd(a->d[1], b->d[0]); |
|||
extract(l[1]); |
|||
muladd(a->d[0], b->d[2]); |
|||
muladd(a->d[1], b->d[1]); |
|||
muladd(a->d[2], b->d[0]); |
|||
extract(l[2]); |
|||
muladd(a->d[0], b->d[3]); |
|||
muladd(a->d[1], b->d[2]); |
|||
muladd(a->d[2], b->d[1]); |
|||
muladd(a->d[3], b->d[0]); |
|||
extract(l[3]); |
|||
muladd(a->d[1], b->d[3]); |
|||
muladd(a->d[2], b->d[2]); |
|||
muladd(a->d[3], b->d[1]); |
|||
extract(l[4]); |
|||
muladd(a->d[2], b->d[3]); |
|||
muladd(a->d[3], b->d[2]); |
|||
extract(l[5]); |
|||
muladd_fast(a->d[3], b->d[3]); |
|||
extract_fast(l[6]); |
|||
VERIFY_CHECK(c1 <= 0); |
|||
l[7] = c0; |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar_t *a) { |
|||
#ifdef USE_ASM_X86_64 |
|||
__asm__ __volatile__( |
|||
/* Preload */ |
|||
"movq 0(%%rdi), %%r11\n" |
|||
"movq 8(%%rdi), %%r12\n" |
|||
"movq 16(%%rdi), %%r13\n" |
|||
"movq 24(%%rdi), %%r14\n" |
|||
/* (rax,rdx) = a0 * a0 */ |
|||
"movq %%r11, %%rax\n" |
|||
"mulq %%r11\n" |
|||
/* Extract l0 */ |
|||
"movq %%rax, 0(%%rsi)\n" |
|||
/* (r8,r9,r10) = (rdx,0) */ |
|||
"movq %%rdx, %%r8\n" |
|||
"xorq %%r9, %%r9\n" |
|||
"xorq %%r10, %%r10\n" |
|||
/* (r8,r9,r10) += 2 * a0 * a1 */ |
|||
"movq %%r11, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* Extract l1 */ |
|||
"movq %%r8, 8(%%rsi)\n" |
|||
"xorq %%r8, %%r8\n" |
|||
/* (r9,r10,r8) += 2 * a0 * a2 */ |
|||
"movq %%r11, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* (r9,r10,r8) += a1 * a1 */ |
|||
"movq %%r12, %%rax\n" |
|||
"mulq %%r12\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* Extract l2 */ |
|||
"movq %%r9, 16(%%rsi)\n" |
|||
"xorq %%r9, %%r9\n" |
|||
/* (r10,r8,r9) += 2 * a0 * a3 */ |
|||
"movq %%r11, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* (r10,r8,r9) += 2 * a1 * a2 */ |
|||
"movq %%r12, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
"adcq $0, %%r9\n" |
|||
/* Extract l3 */ |
|||
"movq %%r10, 24(%%rsi)\n" |
|||
"xorq %%r10, %%r10\n" |
|||
/* (r8,r9,r10) += 2 * a1 * a3 */ |
|||
"movq %%r12, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* (r8,r9,r10) += a2 * a2 */ |
|||
"movq %%r13, %%rax\n" |
|||
"mulq %%r13\n" |
|||
"addq %%rax, %%r8\n" |
|||
"adcq %%rdx, %%r9\n" |
|||
"adcq $0, %%r10\n" |
|||
/* Extract l4 */ |
|||
"movq %%r8, 32(%%rsi)\n" |
|||
"xorq %%r8, %%r8\n" |
|||
/* (r9,r10,r8) += 2 * a2 * a3 */ |
|||
"movq %%r13, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
"addq %%rax, %%r9\n" |
|||
"adcq %%rdx, %%r10\n" |
|||
"adcq $0, %%r8\n" |
|||
/* Extract l5 */ |
|||
"movq %%r9, 40(%%rsi)\n" |
|||
/* (r10,r8) += a3 * a3 */ |
|||
"movq %%r14, %%rax\n" |
|||
"mulq %%r14\n" |
|||
"addq %%rax, %%r10\n" |
|||
"adcq %%rdx, %%r8\n" |
|||
/* Extract l6 */ |
|||
"movq %%r10, 48(%%rsi)\n" |
|||
/* Extract l7 */ |
|||
"movq %%r8, 56(%%rsi)\n" |
|||
: |
|||
: "S"(l), "D"(a->d) |
|||
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory"); |
|||
#else |
|||
/* 160 bit accumulator. */ |
|||
uint64_t c0 = 0, c1 = 0; |
|||
uint32_t c2 = 0; |
|||
|
|||
/* l[0..7] = a[0..3] * b[0..3]. */ |
|||
muladd_fast(a->d[0], a->d[0]); |
|||
extract_fast(l[0]); |
|||
muladd2(a->d[0], a->d[1]); |
|||
extract(l[1]); |
|||
muladd2(a->d[0], a->d[2]); |
|||
muladd(a->d[1], a->d[1]); |
|||
extract(l[2]); |
|||
muladd2(a->d[0], a->d[3]); |
|||
muladd2(a->d[1], a->d[2]); |
|||
extract(l[3]); |
|||
muladd2(a->d[1], a->d[3]); |
|||
muladd(a->d[2], a->d[2]); |
|||
extract(l[4]); |
|||
muladd2(a->d[2], a->d[3]); |
|||
extract(l[5]); |
|||
muladd_fast(a->d[3], a->d[3]); |
|||
extract_fast(l[6]); |
|||
VERIFY_CHECK(c1 == 0); |
|||
l[7] = c0; |
|||
#endif |
|||
} |
|||
|
|||
#undef sumadd |
|||
#undef sumadd_fast |
|||
#undef muladd |
|||
#undef muladd_fast |
|||
#undef muladd2 |
|||
#undef extract |
|||
#undef extract_fast |
|||
|
|||
static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { |
|||
uint64_t l[8]; |
|||
secp256k1_scalar_mul_512(l, a, b); |
|||
secp256k1_scalar_reduce_512(r, l); |
|||
} |
|||
|
|||
static int secp256k1_scalar_shr_int(secp256k1_scalar_t *r, int n) { |
|||
int ret; |
|||
VERIFY_CHECK(n > 0); |
|||
VERIFY_CHECK(n < 16); |
|||
ret = r->d[0] & ((1 << n) - 1); |
|||
r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n)); |
|||
r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n)); |
|||
r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n)); |
|||
r->d[3] = (r->d[3] >> n); |
|||
return ret; |
|||
} |
|||
|
|||
static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { |
|||
uint64_t l[8]; |
|||
secp256k1_scalar_sqr_512(l, a); |
|||
secp256k1_scalar_reduce_512(r, l); |
|||
} |
|||
|
|||
static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { |
|||
r1->d[0] = a->d[0]; |
|||
r1->d[1] = a->d[1]; |
|||
r1->d[2] = 0; |
|||
r1->d[3] = 0; |
|||
r2->d[0] = a->d[2]; |
|||
r2->d[1] = a->d[3]; |
|||
r2->d[2] = 0; |
|||
r2->d[3] = 0; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { |
|||
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) { |
|||
uint64_t l[8]; |
|||
unsigned int shiftlimbs; |
|||
unsigned int shiftlow; |
|||
unsigned int shifthigh; |
|||
VERIFY_CHECK(shift >= 256); |
|||
secp256k1_scalar_mul_512(l, a, b); |
|||
shiftlimbs = shift >> 6; |
|||
shiftlow = shift & 0x3F; |
|||
shifthigh = 64 - shiftlow; |
|||
r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; |
|||
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,19 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_SCALAR_REPR_ |
|||
#define _SECP256K1_SCALAR_REPR_ |
|||
|
|||
#include <stdint.h> |
|||
|
|||
/** A scalar modulo the group order of the secp256k1 curve. */ |
|||
typedef struct { |
|||
uint32_t d[8]; |
|||
} secp256k1_scalar_t; |
|||
|
|||
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}} |
|||
|
|||
#endif |
@ -0,0 +1,721 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ |
|||
#define _SECP256K1_SCALAR_REPR_IMPL_H_ |
|||
|
|||
/* Limbs of the secp256k1 order. */ |
|||
#define SECP256K1_N_0 ((uint32_t)0xD0364141UL) |
|||
#define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL) |
|||
#define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL) |
|||
#define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL) |
|||
#define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL) |
|||
#define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL) |
|||
#define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL) |
|||
#define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL) |
|||
|
|||
/* Limbs of 2^256 minus the secp256k1 order. */ |
|||
#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) |
|||
#define SECP256K1_N_C_1 (~SECP256K1_N_1) |
|||
#define SECP256K1_N_C_2 (~SECP256K1_N_2) |
|||
#define SECP256K1_N_C_3 (~SECP256K1_N_3) |
|||
#define SECP256K1_N_C_4 (1) |
|||
|
|||
/* Limbs of half the secp256k1 order. */ |
|||
#define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL) |
|||
#define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL) |
|||
#define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL) |
|||
#define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL) |
|||
#define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL) |
|||
#define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL) |
|||
#define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL) |
|||
#define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL) |
|||
|
|||
SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar_t *r) { |
|||
r->d[0] = 0; |
|||
r->d[1] = 0; |
|||
r->d[2] = 0; |
|||
r->d[3] = 0; |
|||
r->d[4] = 0; |
|||
r->d[5] = 0; |
|||
r->d[6] = 0; |
|||
r->d[7] = 0; |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v) { |
|||
r->d[0] = v; |
|||
r->d[1] = 0; |
|||
r->d[2] = 0; |
|||
r->d[3] = 0; |
|||
r->d[4] = 0; |
|||
r->d[5] = 0; |
|||
r->d[6] = 0; |
|||
r->d[7] = 0; |
|||
} |
|||
|
|||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) { |
|||
VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); |
|||
return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); |
|||
} |
|||
|
|||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) { |
|||
VERIFY_CHECK(count < 32); |
|||
VERIFY_CHECK(offset + count <= 256); |
|||
if ((offset + count - 1) >> 5 == offset >> 5) { |
|||
return secp256k1_scalar_get_bits(a, offset, count); |
|||
} else { |
|||
VERIFY_CHECK((offset >> 5) + 1 < 8); |
|||
return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1); |
|||
} |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) { |
|||
int yes = 0; |
|||
int no = 0; |
|||
no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */ |
|||
no |= (a->d[6] < SECP256K1_N_6); /* No need for a > check. */ |
|||
no |= (a->d[5] < SECP256K1_N_5); /* No need for a > check. */ |
|||
no |= (a->d[4] < SECP256K1_N_4); |
|||
yes |= (a->d[4] > SECP256K1_N_4) & ~no; |
|||
no |= (a->d[3] < SECP256K1_N_3) & ~yes; |
|||
yes |= (a->d[3] > SECP256K1_N_3) & ~no; |
|||
no |= (a->d[2] < SECP256K1_N_2) & ~yes; |
|||
yes |= (a->d[2] > SECP256K1_N_2) & ~no; |
|||
no |= (a->d[1] < SECP256K1_N_1) & ~yes; |
|||
yes |= (a->d[1] > SECP256K1_N_1) & ~no; |
|||
yes |= (a->d[0] >= SECP256K1_N_0) & ~no; |
|||
return yes; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, uint32_t overflow) { |
|||
uint64_t t; |
|||
VERIFY_CHECK(overflow <= 1); |
|||
t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0; |
|||
r->d[0] = t & 0xFFFFFFFFUL; t >>= 32; |
|||
t += (uint64_t)r->d[1] + overflow * SECP256K1_N_C_1; |
|||
r->d[1] = t & 0xFFFFFFFFUL; t >>= 32; |
|||
t += (uint64_t)r->d[2] + overflow * SECP256K1_N_C_2; |
|||
r->d[2] = t & 0xFFFFFFFFUL; t >>= 32; |
|||
t += (uint64_t)r->d[3] + overflow * SECP256K1_N_C_3; |
|||
r->d[3] = t & 0xFFFFFFFFUL; t >>= 32; |
|||
t += (uint64_t)r->d[4] + overflow * SECP256K1_N_C_4; |
|||
r->d[4] = t & 0xFFFFFFFFUL; t >>= 32; |
|||
t += (uint64_t)r->d[5]; |
|||
r->d[5] = t & 0xFFFFFFFFUL; t >>= 32; |
|||
t += (uint64_t)r->d[6]; |
|||
r->d[6] = t & 0xFFFFFFFFUL; t >>= 32; |
|||
t += (uint64_t)r->d[7]; |
|||
r->d[7] = t & 0xFFFFFFFFUL; |
|||
return overflow; |
|||
} |
|||
|
|||
static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { |
|||
int overflow; |
|||
uint64_t t = (uint64_t)a->d[0] + b->d[0]; |
|||
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)a->d[1] + b->d[1]; |
|||
r->d[1] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)a->d[2] + b->d[2]; |
|||
r->d[2] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)a->d[3] + b->d[3]; |
|||
r->d[3] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)a->d[4] + b->d[4]; |
|||
r->d[4] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)a->d[5] + b->d[5]; |
|||
r->d[5] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)a->d[6] + b->d[6]; |
|||
r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)a->d[7] + b->d[7]; |
|||
r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
overflow = t + secp256k1_scalar_check_overflow(r); |
|||
VERIFY_CHECK(overflow == 0 || overflow == 1); |
|||
secp256k1_scalar_reduce(r, overflow); |
|||
return overflow; |
|||
} |
|||
|
|||
static void secp256k1_scalar_cadd_bit(secp256k1_scalar_t *r, unsigned int bit, int flag) { |
|||
uint64_t t; |
|||
VERIFY_CHECK(bit < 256); |
|||
bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ |
|||
t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F)); |
|||
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F)); |
|||
r->d[1] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F)); |
|||
r->d[2] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F)); |
|||
r->d[3] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F)); |
|||
r->d[4] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F)); |
|||
r->d[5] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F)); |
|||
r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; |
|||
t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); |
|||
r->d[7] = t & 0xFFFFFFFFULL; |
|||
#ifdef VERIFY |
|||
VERIFY_CHECK((t >> 32) == 0); |
|||
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); |
|||
#endif |
|||
} |
|||
|
|||
static void secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *b32, int *overflow) { |
|||
int over; |
|||
r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24; |
|||
r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24; |
|||
r->d[2] = (uint32_t)b32[23] | (uint32_t)b32[22] << 8 | (uint32_t)b32[21] << 16 | (uint32_t)b32[20] << 24; |
|||
r->d[3] = (uint32_t)b32[19] | (uint32_t)b32[18] << 8 | (uint32_t)b32[17] << 16 | (uint32_t)b32[16] << 24; |
|||
r->d[4] = (uint32_t)b32[15] | (uint32_t)b32[14] << 8 | (uint32_t)b32[13] << 16 | (uint32_t)b32[12] << 24; |
|||
r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24; |
|||
r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24; |
|||
r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24; |
|||
over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r)); |
|||
if (overflow) { |
|||
*overflow = over; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a) { |
|||
bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7]; |
|||
bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6]; |
|||
bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5]; |
|||
bin[12] = a->d[4] >> 24; bin[13] = a->d[4] >> 16; bin[14] = a->d[4] >> 8; bin[15] = a->d[4]; |
|||
bin[16] = a->d[3] >> 24; bin[17] = a->d[3] >> 16; bin[18] = a->d[3] >> 8; bin[19] = a->d[3]; |
|||
bin[20] = a->d[2] >> 24; bin[21] = a->d[2] >> 16; bin[22] = a->d[2] >> 8; bin[23] = a->d[2]; |
|||
bin[24] = a->d[1] >> 24; bin[25] = a->d[1] >> 16; bin[26] = a->d[1] >> 8; bin[27] = a->d[1]; |
|||
bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar_t *a) { |
|||
return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; |
|||
} |
|||
|
|||
static void secp256k1_scalar_negate(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { |
|||
uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0); |
|||
uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; |
|||
r->d[0] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; |
|||
r->d[1] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(~a->d[2]) + SECP256K1_N_2; |
|||
r->d[2] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(~a->d[3]) + SECP256K1_N_3; |
|||
r->d[3] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(~a->d[4]) + SECP256K1_N_4; |
|||
r->d[4] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(~a->d[5]) + SECP256K1_N_5; |
|||
r->d[5] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(~a->d[6]) + SECP256K1_N_6; |
|||
r->d[6] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(~a->d[7]) + SECP256K1_N_7; |
|||
r->d[7] = t & nonzero; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar_t *a) { |
|||
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; |
|||
} |
|||
|
|||
static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { |
|||
int yes = 0; |
|||
int no = 0; |
|||
no |= (a->d[7] < SECP256K1_N_H_7); |
|||
yes |= (a->d[7] > SECP256K1_N_H_7) & ~no; |
|||
no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */ |
|||
no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; /* No need for a > check. */ |
|||
no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; /* No need for a > check. */ |
|||
no |= (a->d[3] < SECP256K1_N_H_3) & ~yes; |
|||
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; |
|||
no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; |
|||
yes |= (a->d[2] > SECP256K1_N_H_2) & ~no; |
|||
no |= (a->d[1] < SECP256K1_N_H_1) & ~yes; |
|||
yes |= (a->d[1] > SECP256K1_N_H_1) & ~no; |
|||
yes |= (a->d[0] > SECP256K1_N_H_0) & ~no; |
|||
return yes; |
|||
} |
|||
|
|||
static int secp256k1_scalar_cond_negate(secp256k1_scalar_t *r, int flag) { |
|||
/* If we are flag = 0, mask = 00...00 and this is a no-op;
|
|||
* if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ |
|||
uint32_t mask = !flag - 1; |
|||
uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0); |
|||
uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); |
|||
r->d[0] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); |
|||
r->d[1] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); |
|||
r->d[2] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); |
|||
r->d[3] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(r->d[4] ^ mask) + (SECP256K1_N_4 & mask); |
|||
r->d[4] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(r->d[5] ^ mask) + (SECP256K1_N_5 & mask); |
|||
r->d[5] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(r->d[6] ^ mask) + (SECP256K1_N_6 & mask); |
|||
r->d[6] = t & nonzero; t >>= 32; |
|||
t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask); |
|||
r->d[7] = t & nonzero; |
|||
return 2 * (mask == 0) - 1; |
|||
} |
|||
|
|||
|
|||
/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ |
|||
|
|||
/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ |
|||
#define muladd(a,b) { \ |
|||
uint32_t tl, th; \ |
|||
{ \ |
|||
uint64_t t = (uint64_t)a * b; \ |
|||
th = t >> 32; /* at most 0xFFFFFFFE */ \ |
|||
tl = t; \ |
|||
} \ |
|||
c0 += tl; /* overflow is handled on the next line */ \ |
|||
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ |
|||
c1 += th; /* overflow is handled on the next line */ \ |
|||
c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \ |
|||
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ |
|||
} |
|||
|
|||
/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */ |
|||
#define muladd_fast(a,b) { \ |
|||
uint32_t tl, th; \ |
|||
{ \ |
|||
uint64_t t = (uint64_t)a * b; \ |
|||
th = t >> 32; /* at most 0xFFFFFFFE */ \ |
|||
tl = t; \ |
|||
} \ |
|||
c0 += tl; /* overflow is handled on the next line */ \ |
|||
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ |
|||
c1 += th; /* never overflows by contract (verified in the next line) */ \ |
|||
VERIFY_CHECK(c1 >= th); \ |
|||
} |
|||
|
|||
/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ |
|||
#define muladd2(a,b) { \ |
|||
uint32_t tl, th, th2, tl2; \ |
|||
{ \ |
|||
uint64_t t = (uint64_t)a * b; \ |
|||
th = t >> 32; /* at most 0xFFFFFFFE */ \ |
|||
tl = t; \ |
|||
} \ |
|||
th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \ |
|||
c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ |
|||
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ |
|||
tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \ |
|||
th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ |
|||
c0 += tl2; /* overflow is handled on the next line */ \ |
|||
th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \ |
|||
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ |
|||
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ |
|||
c1 += th2; /* overflow is handled on the next line */ \ |
|||
c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ |
|||
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ |
|||
} |
|||
|
|||
/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ |
|||
#define sumadd(a) { \ |
|||
unsigned int over; \ |
|||
c0 += (a); /* overflow is handled on the next line */ \ |
|||
over = (c0 < (a)) ? 1 : 0; \ |
|||
c1 += over; /* overflow is handled on the next line */ \ |
|||
c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \ |
|||
} |
|||
|
|||
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */ |
|||
#define sumadd_fast(a) { \ |
|||
c0 += (a); /* overflow is handled on the next line */ \ |
|||
c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ |
|||
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ |
|||
VERIFY_CHECK(c2 == 0); \ |
|||
} |
|||
|
|||
/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. */ |
|||
#define extract(n) { \ |
|||
(n) = c0; \ |
|||
c0 = c1; \ |
|||
c1 = c2; \ |
|||
c2 = 0; \ |
|||
} |
|||
|
|||
/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. c2 is required to be zero. */ |
|||
#define extract_fast(n) { \ |
|||
(n) = c0; \ |
|||
c0 = c1; \ |
|||
c1 = 0; \ |
|||
VERIFY_CHECK(c2 == 0); \ |
|||
} |
|||
|
|||
static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint32_t *l) { |
|||
uint64_t c; |
|||
uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15]; |
|||
uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12; |
|||
uint32_t p0, p1, p2, p3, p4, p5, p6, p7, p8; |
|||
|
|||
/* 96 bit accumulator. */ |
|||
uint32_t c0, c1, c2; |
|||
|
|||
/* Reduce 512 bits into 385. */ |
|||
/* m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. */ |
|||
c0 = l[0]; c1 = 0; c2 = 0; |
|||
muladd_fast(n0, SECP256K1_N_C_0); |
|||
extract_fast(m0); |
|||
sumadd_fast(l[1]); |
|||
muladd(n1, SECP256K1_N_C_0); |
|||
muladd(n0, SECP256K1_N_C_1); |
|||
extract(m1); |
|||
sumadd(l[2]); |
|||
muladd(n2, SECP256K1_N_C_0); |
|||
muladd(n1, SECP256K1_N_C_1); |
|||
muladd(n0, SECP256K1_N_C_2); |
|||
extract(m2); |
|||
sumadd(l[3]); |
|||
muladd(n3, SECP256K1_N_C_0); |
|||
muladd(n2, SECP256K1_N_C_1); |
|||
muladd(n1, SECP256K1_N_C_2); |
|||
muladd(n0, SECP256K1_N_C_3); |
|||
extract(m3); |
|||
sumadd(l[4]); |
|||
muladd(n4, SECP256K1_N_C_0); |
|||
muladd(n3, SECP256K1_N_C_1); |
|||
muladd(n2, SECP256K1_N_C_2); |
|||
muladd(n1, SECP256K1_N_C_3); |
|||
sumadd(n0); |
|||
extract(m4); |
|||
sumadd(l[5]); |
|||
muladd(n5, SECP256K1_N_C_0); |
|||
muladd(n4, SECP256K1_N_C_1); |
|||
muladd(n3, SECP256K1_N_C_2); |
|||
muladd(n2, SECP256K1_N_C_3); |
|||
sumadd(n1); |
|||
extract(m5); |
|||
sumadd(l[6]); |
|||
muladd(n6, SECP256K1_N_C_0); |
|||
muladd(n5, SECP256K1_N_C_1); |
|||
muladd(n4, SECP256K1_N_C_2); |
|||
muladd(n3, SECP256K1_N_C_3); |
|||
sumadd(n2); |
|||
extract(m6); |
|||
sumadd(l[7]); |
|||
muladd(n7, SECP256K1_N_C_0); |
|||
muladd(n6, SECP256K1_N_C_1); |
|||
muladd(n5, SECP256K1_N_C_2); |
|||
muladd(n4, SECP256K1_N_C_3); |
|||
sumadd(n3); |
|||
extract(m7); |
|||
muladd(n7, SECP256K1_N_C_1); |
|||
muladd(n6, SECP256K1_N_C_2); |
|||
muladd(n5, SECP256K1_N_C_3); |
|||
sumadd(n4); |
|||
extract(m8); |
|||
muladd(n7, SECP256K1_N_C_2); |
|||
muladd(n6, SECP256K1_N_C_3); |
|||
sumadd(n5); |
|||
extract(m9); |
|||
muladd(n7, SECP256K1_N_C_3); |
|||
sumadd(n6); |
|||
extract(m10); |
|||
sumadd_fast(n7); |
|||
extract_fast(m11); |
|||
VERIFY_CHECK(c0 <= 1); |
|||
m12 = c0; |
|||
|
|||
/* Reduce 385 bits into 258. */ |
|||
/* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */ |
|||
c0 = m0; c1 = 0; c2 = 0; |
|||
muladd_fast(m8, SECP256K1_N_C_0); |
|||
extract_fast(p0); |
|||
sumadd_fast(m1); |
|||
muladd(m9, SECP256K1_N_C_0); |
|||
muladd(m8, SECP256K1_N_C_1); |
|||
extract(p1); |
|||
sumadd(m2); |
|||
muladd(m10, SECP256K1_N_C_0); |
|||
muladd(m9, SECP256K1_N_C_1); |
|||
muladd(m8, SECP256K1_N_C_2); |
|||
extract(p2); |
|||
sumadd(m3); |
|||
muladd(m11, SECP256K1_N_C_0); |
|||
muladd(m10, SECP256K1_N_C_1); |
|||
muladd(m9, SECP256K1_N_C_2); |
|||
muladd(m8, SECP256K1_N_C_3); |
|||
extract(p3); |
|||
sumadd(m4); |
|||
muladd(m12, SECP256K1_N_C_0); |
|||
muladd(m11, SECP256K1_N_C_1); |
|||
muladd(m10, SECP256K1_N_C_2); |
|||
muladd(m9, SECP256K1_N_C_3); |
|||
sumadd(m8); |
|||
extract(p4); |
|||
sumadd(m5); |
|||
muladd(m12, SECP256K1_N_C_1); |
|||
muladd(m11, SECP256K1_N_C_2); |
|||
muladd(m10, SECP256K1_N_C_3); |
|||
sumadd(m9); |
|||
extract(p5); |
|||
sumadd(m6); |
|||
muladd(m12, SECP256K1_N_C_2); |
|||
muladd(m11, SECP256K1_N_C_3); |
|||
sumadd(m10); |
|||
extract(p6); |
|||
sumadd_fast(m7); |
|||
muladd_fast(m12, SECP256K1_N_C_3); |
|||
sumadd_fast(m11); |
|||
extract_fast(p7); |
|||
p8 = c0 + m12; |
|||
VERIFY_CHECK(p8 <= 2); |
|||
|
|||
/* Reduce 258 bits into 256. */ |
|||
/* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */ |
|||
c = p0 + (uint64_t)SECP256K1_N_C_0 * p8; |
|||
r->d[0] = c & 0xFFFFFFFFUL; c >>= 32; |
|||
c += p1 + (uint64_t)SECP256K1_N_C_1 * p8; |
|||
r->d[1] = c & 0xFFFFFFFFUL; c >>= 32; |
|||
c += p2 + (uint64_t)SECP256K1_N_C_2 * p8; |
|||
r->d[2] = c & 0xFFFFFFFFUL; c >>= 32; |
|||
c += p3 + (uint64_t)SECP256K1_N_C_3 * p8; |
|||
r->d[3] = c & 0xFFFFFFFFUL; c >>= 32; |
|||
c += p4 + (uint64_t)p8; |
|||
r->d[4] = c & 0xFFFFFFFFUL; c >>= 32; |
|||
c += p5; |
|||
r->d[5] = c & 0xFFFFFFFFUL; c >>= 32; |
|||
c += p6; |
|||
r->d[6] = c & 0xFFFFFFFFUL; c >>= 32; |
|||
c += p7; |
|||
r->d[7] = c & 0xFFFFFFFFUL; c >>= 32; |
|||
|
|||
/* Final reduction of r. */ |
|||
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); |
|||
} |
|||
|
|||
static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { |
|||
/* 96 bit accumulator. */ |
|||
uint32_t c0 = 0, c1 = 0, c2 = 0; |
|||
|
|||
/* l[0..15] = a[0..7] * b[0..7]. */ |
|||
muladd_fast(a->d[0], b->d[0]); |
|||
extract_fast(l[0]); |
|||
muladd(a->d[0], b->d[1]); |
|||
muladd(a->d[1], b->d[0]); |
|||
extract(l[1]); |
|||
muladd(a->d[0], b->d[2]); |
|||
muladd(a->d[1], b->d[1]); |
|||
muladd(a->d[2], b->d[0]); |
|||
extract(l[2]); |
|||
muladd(a->d[0], b->d[3]); |
|||
muladd(a->d[1], b->d[2]); |
|||
muladd(a->d[2], b->d[1]); |
|||
muladd(a->d[3], b->d[0]); |
|||
extract(l[3]); |
|||
muladd(a->d[0], b->d[4]); |
|||
muladd(a->d[1], b->d[3]); |
|||
muladd(a->d[2], b->d[2]); |
|||
muladd(a->d[3], b->d[1]); |
|||
muladd(a->d[4], b->d[0]); |
|||
extract(l[4]); |
|||
muladd(a->d[0], b->d[5]); |
|||
muladd(a->d[1], b->d[4]); |
|||
muladd(a->d[2], b->d[3]); |
|||
muladd(a->d[3], b->d[2]); |
|||
muladd(a->d[4], b->d[1]); |
|||
muladd(a->d[5], b->d[0]); |
|||
extract(l[5]); |
|||
muladd(a->d[0], b->d[6]); |
|||
muladd(a->d[1], b->d[5]); |
|||
muladd(a->d[2], b->d[4]); |
|||
muladd(a->d[3], b->d[3]); |
|||
muladd(a->d[4], b->d[2]); |
|||
muladd(a->d[5], b->d[1]); |
|||
muladd(a->d[6], b->d[0]); |
|||
extract(l[6]); |
|||
muladd(a->d[0], b->d[7]); |
|||
muladd(a->d[1], b->d[6]); |
|||
muladd(a->d[2], b->d[5]); |
|||
muladd(a->d[3], b->d[4]); |
|||
muladd(a->d[4], b->d[3]); |
|||
muladd(a->d[5], b->d[2]); |
|||
muladd(a->d[6], b->d[1]); |
|||
muladd(a->d[7], b->d[0]); |
|||
extract(l[7]); |
|||
muladd(a->d[1], b->d[7]); |
|||
muladd(a->d[2], b->d[6]); |
|||
muladd(a->d[3], b->d[5]); |
|||
muladd(a->d[4], b->d[4]); |
|||
muladd(a->d[5], b->d[3]); |
|||
muladd(a->d[6], b->d[2]); |
|||
muladd(a->d[7], b->d[1]); |
|||
extract(l[8]); |
|||
muladd(a->d[2], b->d[7]); |
|||
muladd(a->d[3], b->d[6]); |
|||
muladd(a->d[4], b->d[5]); |
|||
muladd(a->d[5], b->d[4]); |
|||
muladd(a->d[6], b->d[3]); |
|||
muladd(a->d[7], b->d[2]); |
|||
extract(l[9]); |
|||
muladd(a->d[3], b->d[7]); |
|||
muladd(a->d[4], b->d[6]); |
|||
muladd(a->d[5], b->d[5]); |
|||
muladd(a->d[6], b->d[4]); |
|||
muladd(a->d[7], b->d[3]); |
|||
extract(l[10]); |
|||
muladd(a->d[4], b->d[7]); |
|||
muladd(a->d[5], b->d[6]); |
|||
muladd(a->d[6], b->d[5]); |
|||
muladd(a->d[7], b->d[4]); |
|||
extract(l[11]); |
|||
muladd(a->d[5], b->d[7]); |
|||
muladd(a->d[6], b->d[6]); |
|||
muladd(a->d[7], b->d[5]); |
|||
extract(l[12]); |
|||
muladd(a->d[6], b->d[7]); |
|||
muladd(a->d[7], b->d[6]); |
|||
extract(l[13]); |
|||
muladd_fast(a->d[7], b->d[7]); |
|||
extract_fast(l[14]); |
|||
VERIFY_CHECK(c1 == 0); |
|||
l[15] = c0; |
|||
} |
|||
|
|||
static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar_t *a) { |
|||
/* 96 bit accumulator. */ |
|||
uint32_t c0 = 0, c1 = 0, c2 = 0; |
|||
|
|||
/* l[0..15] = a[0..7]^2. */ |
|||
muladd_fast(a->d[0], a->d[0]); |
|||
extract_fast(l[0]); |
|||
muladd2(a->d[0], a->d[1]); |
|||
extract(l[1]); |
|||
muladd2(a->d[0], a->d[2]); |
|||
muladd(a->d[1], a->d[1]); |
|||
extract(l[2]); |
|||
muladd2(a->d[0], a->d[3]); |
|||
muladd2(a->d[1], a->d[2]); |
|||
extract(l[3]); |
|||
muladd2(a->d[0], a->d[4]); |
|||
muladd2(a->d[1], a->d[3]); |
|||
muladd(a->d[2], a->d[2]); |
|||
extract(l[4]); |
|||
muladd2(a->d[0], a->d[5]); |
|||
muladd2(a->d[1], a->d[4]); |
|||
muladd2(a->d[2], a->d[3]); |
|||
extract(l[5]); |
|||
muladd2(a->d[0], a->d[6]); |
|||
muladd2(a->d[1], a->d[5]); |
|||
muladd2(a->d[2], a->d[4]); |
|||
muladd(a->d[3], a->d[3]); |
|||
extract(l[6]); |
|||
muladd2(a->d[0], a->d[7]); |
|||
muladd2(a->d[1], a->d[6]); |
|||
muladd2(a->d[2], a->d[5]); |
|||
muladd2(a->d[3], a->d[4]); |
|||
extract(l[7]); |
|||
muladd2(a->d[1], a->d[7]); |
|||
muladd2(a->d[2], a->d[6]); |
|||
muladd2(a->d[3], a->d[5]); |
|||
muladd(a->d[4], a->d[4]); |
|||
extract(l[8]); |
|||
muladd2(a->d[2], a->d[7]); |
|||
muladd2(a->d[3], a->d[6]); |
|||
muladd2(a->d[4], a->d[5]); |
|||
extract(l[9]); |
|||
muladd2(a->d[3], a->d[7]); |
|||
muladd2(a->d[4], a->d[6]); |
|||
muladd(a->d[5], a->d[5]); |
|||
extract(l[10]); |
|||
muladd2(a->d[4], a->d[7]); |
|||
muladd2(a->d[5], a->d[6]); |
|||
extract(l[11]); |
|||
muladd2(a->d[5], a->d[7]); |
|||
muladd(a->d[6], a->d[6]); |
|||
extract(l[12]); |
|||
muladd2(a->d[6], a->d[7]); |
|||
extract(l[13]); |
|||
muladd_fast(a->d[7], a->d[7]); |
|||
extract_fast(l[14]); |
|||
VERIFY_CHECK(c1 == 0); |
|||
l[15] = c0; |
|||
} |
|||
|
|||
#undef sumadd |
|||
#undef sumadd_fast |
|||
#undef muladd |
|||
#undef muladd_fast |
|||
#undef muladd2 |
|||
#undef extract |
|||
#undef extract_fast |
|||
|
|||
static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { |
|||
uint32_t l[16]; |
|||
secp256k1_scalar_mul_512(l, a, b); |
|||
secp256k1_scalar_reduce_512(r, l); |
|||
} |
|||
|
|||
static int secp256k1_scalar_shr_int(secp256k1_scalar_t *r, int n) { |
|||
int ret; |
|||
VERIFY_CHECK(n > 0); |
|||
VERIFY_CHECK(n < 16); |
|||
ret = r->d[0] & ((1 << n) - 1); |
|||
r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n)); |
|||
r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n)); |
|||
r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n)); |
|||
r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n)); |
|||
r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n)); |
|||
r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n)); |
|||
r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n)); |
|||
r->d[7] = (r->d[7] >> n); |
|||
return ret; |
|||
} |
|||
|
|||
static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { |
|||
uint32_t l[16]; |
|||
secp256k1_scalar_sqr_512(l, a); |
|||
secp256k1_scalar_reduce_512(r, l); |
|||
} |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { |
|||
r1->d[0] = a->d[0]; |
|||
r1->d[1] = a->d[1]; |
|||
r1->d[2] = a->d[2]; |
|||
r1->d[3] = a->d[3]; |
|||
r1->d[4] = 0; |
|||
r1->d[5] = 0; |
|||
r1->d[6] = 0; |
|||
r1->d[7] = 0; |
|||
r2->d[0] = a->d[4]; |
|||
r2->d[1] = a->d[5]; |
|||
r2->d[2] = a->d[6]; |
|||
r2->d[3] = a->d[7]; |
|||
r2->d[4] = 0; |
|||
r2->d[5] = 0; |
|||
r2->d[6] = 0; |
|||
r2->d[7] = 0; |
|||
} |
|||
#endif |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { |
|||
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; |
|||
} |
|||
|
|||
SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) { |
|||
uint32_t l[16]; |
|||
unsigned int shiftlimbs; |
|||
unsigned int shiftlow; |
|||
unsigned int shifthigh; |
|||
VERIFY_CHECK(shift >= 256); |
|||
secp256k1_scalar_mul_512(l, a, b); |
|||
shiftlimbs = shift >> 5; |
|||
shiftlow = shift & 0x1F; |
|||
shifthigh = 32 - shiftlow; |
|||
r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; |
|||
r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; |
|||
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); |
|||
} |
|||
|
|||
#endif |
@ -0,0 +1,337 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_SCALAR_IMPL_H_ |
|||
#define _SECP256K1_SCALAR_IMPL_H_ |
|||
|
|||
#include <string.h> |
|||
|
|||
#include "group.h" |
|||
#include "scalar.h" |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
#if defined(USE_SCALAR_4X64) |
|||
#include "scalar_4x64_impl.h" |
|||
#elif defined(USE_SCALAR_8X32) |
|||
#include "scalar_8x32_impl.h" |
|||
#else |
|||
#error "Please select scalar implementation" |
|||
#endif |
|||
|
|||
#ifndef USE_NUM_NONE |
|||
static void secp256k1_scalar_get_num(secp256k1_num_t *r, const secp256k1_scalar_t *a) { |
|||
unsigned char c[32]; |
|||
secp256k1_scalar_get_b32(c, a); |
|||
secp256k1_num_set_bin(r, c, 32); |
|||
} |
|||
|
|||
/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */ |
|||
static void secp256k1_scalar_order_get_num(secp256k1_num_t *r) { |
|||
static const unsigned char order[32] = { |
|||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, |
|||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, |
|||
0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, |
|||
0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 |
|||
}; |
|||
secp256k1_num_set_bin(r, order, 32); |
|||
} |
|||
#endif |
|||
|
|||
static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scalar_t *x) { |
|||
secp256k1_scalar_t *t; |
|||
int i; |
|||
/* First compute x ^ (2^N - 1) for some values of N. */ |
|||
secp256k1_scalar_t x2, x3, x4, x6, x7, x8, x15, x30, x60, x120, x127; |
|||
|
|||
secp256k1_scalar_sqr(&x2, x); |
|||
secp256k1_scalar_mul(&x2, &x2, x); |
|||
|
|||
secp256k1_scalar_sqr(&x3, &x2); |
|||
secp256k1_scalar_mul(&x3, &x3, x); |
|||
|
|||
secp256k1_scalar_sqr(&x4, &x3); |
|||
secp256k1_scalar_mul(&x4, &x4, x); |
|||
|
|||
secp256k1_scalar_sqr(&x6, &x4); |
|||
secp256k1_scalar_sqr(&x6, &x6); |
|||
secp256k1_scalar_mul(&x6, &x6, &x2); |
|||
|
|||
secp256k1_scalar_sqr(&x7, &x6); |
|||
secp256k1_scalar_mul(&x7, &x7, x); |
|||
|
|||
secp256k1_scalar_sqr(&x8, &x7); |
|||
secp256k1_scalar_mul(&x8, &x8, x); |
|||
|
|||
secp256k1_scalar_sqr(&x15, &x8); |
|||
for (i = 0; i < 6; i++) { |
|||
secp256k1_scalar_sqr(&x15, &x15); |
|||
} |
|||
secp256k1_scalar_mul(&x15, &x15, &x7); |
|||
|
|||
secp256k1_scalar_sqr(&x30, &x15); |
|||
for (i = 0; i < 14; i++) { |
|||
secp256k1_scalar_sqr(&x30, &x30); |
|||
} |
|||
secp256k1_scalar_mul(&x30, &x30, &x15); |
|||
|
|||
secp256k1_scalar_sqr(&x60, &x30); |
|||
for (i = 0; i < 29; i++) { |
|||
secp256k1_scalar_sqr(&x60, &x60); |
|||
} |
|||
secp256k1_scalar_mul(&x60, &x60, &x30); |
|||
|
|||
secp256k1_scalar_sqr(&x120, &x60); |
|||
for (i = 0; i < 59; i++) { |
|||
secp256k1_scalar_sqr(&x120, &x120); |
|||
} |
|||
secp256k1_scalar_mul(&x120, &x120, &x60); |
|||
|
|||
secp256k1_scalar_sqr(&x127, &x120); |
|||
for (i = 0; i < 6; i++) { |
|||
secp256k1_scalar_sqr(&x127, &x127); |
|||
} |
|||
secp256k1_scalar_mul(&x127, &x127, &x7); |
|||
|
|||
/* Then accumulate the final result (t starts at x127). */ |
|||
t = &x127; |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 4; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 4; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ |
|||
for (i = 0; i < 3; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x2); /* 11 */ |
|||
for (i = 0; i < 4; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ |
|||
for (i = 0; i < 5; i++) { /* 00 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ |
|||
for (i = 0; i < 4; i++) { /* 00 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x2); /* 11 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 5; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x4); /* 1111 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 3; i++) { /* 00 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 4; i++) { /* 000 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 10; i++) { /* 0000000 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ |
|||
for (i = 0; i < 4; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ |
|||
for (i = 0; i < 9; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x8); /* 11111111 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 3; i++) { /* 00 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 3; i++) { /* 00 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 5; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x4); /* 1111 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 5; i++) { /* 000 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x2); /* 11 */ |
|||
for (i = 0; i < 4; i++) { /* 00 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x2); /* 11 */ |
|||
for (i = 0; i < 2; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 8; i++) { /* 000000 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x2); /* 11 */ |
|||
for (i = 0; i < 3; i++) { /* 0 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, &x2); /* 11 */ |
|||
for (i = 0; i < 3; i++) { /* 00 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 6; i++) { /* 00000 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(t, t, x); /* 1 */ |
|||
for (i = 0; i < 8; i++) { /* 00 */ |
|||
secp256k1_scalar_sqr(t, t); |
|||
} |
|||
secp256k1_scalar_mul(r, t, &x6); /* 111111 */ |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar_t *a) { |
|||
/* d[0] is present and is the lowest word for all representations */ |
|||
return !(a->d[0] & 1); |
|||
} |
|||
|
|||
static void secp256k1_scalar_inverse_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *x) { |
|||
#if defined(USE_SCALAR_INV_BUILTIN) |
|||
secp256k1_scalar_inverse(r, x); |
|||
#elif defined(USE_SCALAR_INV_NUM) |
|||
unsigned char b[32]; |
|||
secp256k1_num_t n, m; |
|||
secp256k1_scalar_t t = *x; |
|||
secp256k1_scalar_get_b32(b, &t); |
|||
secp256k1_num_set_bin(&n, b, 32); |
|||
secp256k1_scalar_order_get_num(&m); |
|||
secp256k1_num_mod_inverse(&n, &n, &m); |
|||
secp256k1_num_get_bin(b, 32, &n); |
|||
secp256k1_scalar_set_b32(r, b, NULL); |
|||
/* Verify that the inverse was computed correctly, without GMP code. */ |
|||
secp256k1_scalar_mul(&t, &t, r); |
|||
CHECK(secp256k1_scalar_is_one(&t)); |
|||
#else |
|||
#error "Please select scalar inverse implementation" |
|||
#endif |
|||
} |
|||
|
|||
#ifdef USE_ENDOMORPHISM |
|||
/**
|
|||
* The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where |
|||
* lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, |
|||
* 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72} |
|||
* |
|||
* "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm |
|||
* (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1 |
|||
* and k2 have a small size. |
|||
* It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are: |
|||
* |
|||
* - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} |
|||
* - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3} |
|||
* - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8} |
|||
* - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} |
|||
* |
|||
* The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives |
|||
* k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and |
|||
* compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2. |
|||
* |
|||
* g1, g2 are precomputed constants used to replace division with a rounded multiplication |
|||
* when decomposing the scalar for an endomorphism-based point multiplication. |
|||
* |
|||
* The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve |
|||
* Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5. |
|||
* |
|||
* The derivation is described in the paper "Efficient Software Implementation of Public-Key |
|||
* Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez), |
|||
* Section 4.3 (here we use a somewhat higher-precision estimate): |
|||
* d = a1*b2 - b1*a2 |
|||
* g1 = round((2^272)*b2/d) |
|||
* g2 = round((2^272)*b1/d) |
|||
* |
|||
* (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found |
|||
* as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda'). |
|||
* |
|||
* The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order). |
|||
*/ |
|||
|
|||
static void secp256k1_scalar_split_lambda(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { |
|||
secp256k1_scalar_t c1, c2; |
|||
static const secp256k1_scalar_t minus_lambda = SECP256K1_SCALAR_CONST( |
|||
0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL, |
|||
0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL |
|||
); |
|||
static const secp256k1_scalar_t minus_b1 = SECP256K1_SCALAR_CONST( |
|||
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, |
|||
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL |
|||
); |
|||
static const secp256k1_scalar_t minus_b2 = SECP256K1_SCALAR_CONST( |
|||
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, |
|||
0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL |
|||
); |
|||
static const secp256k1_scalar_t g1 = SECP256K1_SCALAR_CONST( |
|||
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL, |
|||
0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL |
|||
); |
|||
static const secp256k1_scalar_t g2 = SECP256K1_SCALAR_CONST( |
|||
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL, |
|||
0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL |
|||
); |
|||
VERIFY_CHECK(r1 != a); |
|||
VERIFY_CHECK(r2 != a); |
|||
/* these _var calls are constant time since the shift amount is constant */ |
|||
secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272); |
|||
secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272); |
|||
secp256k1_scalar_mul(&c1, &c1, &minus_b1); |
|||
secp256k1_scalar_mul(&c2, &c2, &minus_b2); |
|||
secp256k1_scalar_add(r2, &c1, &c2); |
|||
secp256k1_scalar_mul(r1, r2, &minus_lambda); |
|||
secp256k1_scalar_add(r1, r1, a); |
|||
} |
|||
#endif |
|||
|
|||
#endif |
@ -0,0 +1,594 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013-2015 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#define SECP256K1_BUILD (1) |
|||
|
|||
#include "secp256k1.h" |
|||
|
|||
#include "util.h" |
|||
#include "num_impl.h" |
|||
#include "field_impl.h" |
|||
#include "scalar_impl.h" |
|||
#include "group_impl.h" |
|||
#include "ecmult_impl.h" |
|||
#include "ecmult_const_impl.h" |
|||
#include "ecmult_gen_impl.h" |
|||
#include "ecdsa_impl.h" |
|||
#include "eckey_impl.h" |
|||
#include "hash_impl.h" |
|||
|
|||
#ifdef ENABLE_MODULE_RANGEPROOF |
|||
# include "modules/rangeproof/pedersen.h" |
|||
# include "modules/rangeproof/rangeproof.h" |
|||
#endif |
|||
|
|||
#define ARG_CHECK(cond) do { \ |
|||
if (EXPECT(!(cond), 0)) { \ |
|||
ctx->illegal_callback.fn(#cond, ctx->illegal_callback.data); \ |
|||
return 0; \ |
|||
} \ |
|||
} while(0) |
|||
|
|||
static void default_illegal_callback_fn(const char* str, void* data) { |
|||
(void)data; |
|||
fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); |
|||
abort(); |
|||
} |
|||
|
|||
static const callback_t default_illegal_callback = { |
|||
default_illegal_callback_fn, |
|||
NULL |
|||
}; |
|||
|
|||
static void default_error_callback_fn(const char* str, void* data) { |
|||
(void)data; |
|||
fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); |
|||
abort(); |
|||
} |
|||
|
|||
static const callback_t default_error_callback = { |
|||
default_error_callback_fn, |
|||
NULL |
|||
}; |
|||
|
|||
|
|||
struct secp256k1_context_struct { |
|||
secp256k1_ecmult_context_t ecmult_ctx; |
|||
secp256k1_ecmult_gen_context_t ecmult_gen_ctx; |
|||
#ifdef ENABLE_MODULE_RANGEPROOF |
|||
secp256k1_pedersen_context_t pedersen_ctx; |
|||
secp256k1_rangeproof_context_t rangeproof_ctx; |
|||
#endif |
|||
callback_t illegal_callback; |
|||
callback_t error_callback; |
|||
}; |
|||
|
|||
secp256k1_context_t* secp256k1_context_create(int flags) { |
|||
secp256k1_context_t* ret = (secp256k1_context_t*)checked_malloc(&default_error_callback, sizeof(secp256k1_context_t)); |
|||
ret->illegal_callback = default_illegal_callback; |
|||
ret->error_callback = default_error_callback; |
|||
|
|||
secp256k1_ecmult_context_init(&ret->ecmult_ctx); |
|||
secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx); |
|||
#ifdef ENABLE_MODULE_RANGEPROOF |
|||
secp256k1_pedersen_context_init(&ret->pedersen_ctx); |
|||
secp256k1_rangeproof_context_init(&ret->rangeproof_ctx); |
|||
#endif |
|||
|
|||
if (flags & SECP256K1_CONTEXT_SIGN) { |
|||
secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &ret->error_callback); |
|||
} |
|||
if (flags & SECP256K1_CONTEXT_VERIFY) { |
|||
secp256k1_ecmult_context_build(&ret->ecmult_ctx, &ret->error_callback); |
|||
} |
|||
|
|||
return ret; |
|||
} |
|||
|
|||
secp256k1_context_t* secp256k1_context_clone(const secp256k1_context_t* ctx) { |
|||
secp256k1_context_t* ret = (secp256k1_context_t*)checked_malloc(&ctx->error_callback, sizeof(secp256k1_context_t)); |
|||
ret->illegal_callback = ctx->illegal_callback; |
|||
ret->error_callback = ctx->error_callback; |
|||
secp256k1_ecmult_context_clone(&ret->ecmult_ctx, &ctx->ecmult_ctx, &ctx->error_callback); |
|||
secp256k1_ecmult_gen_context_clone(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx, &ctx->error_callback); |
|||
#ifdef ENABLE_MODULE_RANGEPROOF |
|||
secp256k1_pedersen_context_clone(&ret->pedersen_ctx, &ctx->pedersen_ctx, &ctx->error_callback); |
|||
secp256k1_rangeproof_context_clone(&ret->rangeproof_ctx, &ctx->rangeproof_ctx, &ctx->error_callback); |
|||
#endif |
|||
return ret; |
|||
} |
|||
|
|||
void secp256k1_context_destroy(secp256k1_context_t* ctx) { |
|||
secp256k1_ecmult_context_clear(&ctx->ecmult_ctx); |
|||
secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); |
|||
#ifdef ENABLE_MODULE_RANGEPROOF |
|||
secp256k1_pedersen_context_clear(&ctx->pedersen_ctx); |
|||
secp256k1_rangeproof_context_clear(&ctx->rangeproof_ctx); |
|||
#endif |
|||
|
|||
free(ctx); |
|||
} |
|||
|
|||
void secp256k1_context_set_illegal_callback(secp256k1_context_t* ctx, void (*fun)(const char* message, void* data), void* data) { |
|||
ctx->illegal_callback.fn = fun; |
|||
ctx->illegal_callback.data = data; |
|||
} |
|||
|
|||
void secp256k1_context_set_error_callback(secp256k1_context_t* ctx, void (*fun)(const char* message, void* data), void* data) { |
|||
ctx->error_callback.fn = fun; |
|||
ctx->error_callback.data = data; |
|||
} |
|||
|
|||
static int secp256k1_pubkey_load(const secp256k1_context_t* ctx, secp256k1_ge_t* ge, const secp256k1_pubkey_t* pubkey) { |
|||
if (sizeof(secp256k1_ge_storage_t) == 64) { |
|||
/* When the secp256k1_ge_storage_t type is exactly 64 byte, use its
|
|||
* representation inside secp256k1_pubkey_t, as conversion is very fast. |
|||
* Note that secp256k1_pubkey_save must use the same representation. */ |
|||
secp256k1_ge_storage_t s; |
|||
memcpy(&s, &pubkey->data[0], 64); |
|||
secp256k1_ge_from_storage(ge, &s); |
|||
} else { |
|||
/* Otherwise, fall back to 32-byte big endian for X and Y. */ |
|||
secp256k1_fe_t x, y; |
|||
secp256k1_fe_set_b32(&x, pubkey->data); |
|||
secp256k1_fe_set_b32(&y, pubkey->data + 32); |
|||
secp256k1_ge_set_xy(ge, &x, &y); |
|||
} |
|||
ARG_CHECK(!secp256k1_fe_is_zero(&ge->x)); |
|||
return 1; |
|||
} |
|||
|
|||
static void secp256k1_pubkey_save(secp256k1_pubkey_t* pubkey, secp256k1_ge_t* ge) { |
|||
if (sizeof(secp256k1_ge_storage_t) == 64) { |
|||
secp256k1_ge_storage_t s; |
|||
secp256k1_ge_to_storage(&s, ge); |
|||
memcpy(&pubkey->data[0], &s, 64); |
|||
} else { |
|||
VERIFY_CHECK(!secp256k1_ge_is_infinity(ge)); |
|||
secp256k1_fe_normalize_var(&ge->x); |
|||
secp256k1_fe_normalize_var(&ge->y); |
|||
secp256k1_fe_get_b32(pubkey->data, &ge->x); |
|||
secp256k1_fe_get_b32(pubkey->data + 32, &ge->y); |
|||
} |
|||
} |
|||
|
|||
int secp256k1_ec_pubkey_parse(const secp256k1_context_t* ctx, secp256k1_pubkey_t* pubkey, const unsigned char *input, int inputlen) { |
|||
secp256k1_ge_t Q; |
|||
|
|||
(void)ctx; |
|||
if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) { |
|||
memset(pubkey, 0, sizeof(*pubkey)); |
|||
return 0; |
|||
} |
|||
secp256k1_pubkey_save(pubkey, &Q); |
|||
secp256k1_ge_clear(&Q); |
|||
return 1; |
|||
} |
|||
|
|||
int secp256k1_ec_pubkey_serialize(const secp256k1_context_t* ctx, unsigned char *output, int *outputlen, const secp256k1_pubkey_t* pubkey, int compressed) { |
|||
secp256k1_ge_t Q; |
|||
|
|||
(void)ctx; |
|||
return (secp256k1_pubkey_load(ctx, &Q, pubkey) && |
|||
secp256k1_eckey_pubkey_serialize(&Q, output, outputlen, compressed)); |
|||
} |
|||
|
|||
static void secp256k1_ecdsa_signature_load(const secp256k1_context_t* ctx, secp256k1_scalar_t* r, secp256k1_scalar_t* s, int* recid, const secp256k1_ecdsa_signature_t* sig) { |
|||
(void)ctx; |
|||
if (sizeof(secp256k1_scalar_t) == 32) { |
|||
/* When the secp256k1_scalar_t type is exactly 32 byte, use its
|
|||
* representation inside secp256k1_ecdsa_signature_t, as conversion is very fast. |
|||
* Note that secp256k1_ecdsa_signature_save must use the same representation. */ |
|||
memcpy(r, &sig->data[0], 32); |
|||
memcpy(s, &sig->data[32], 32); |
|||
} else { |
|||
secp256k1_scalar_set_b32(r, &sig->data[0], NULL); |
|||
secp256k1_scalar_set_b32(s, &sig->data[32], NULL); |
|||
} |
|||
if (recid) { |
|||
*recid = sig->data[64]; |
|||
} |
|||
} |
|||
|
|||
static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature_t* sig, const secp256k1_scalar_t* r, const secp256k1_scalar_t* s, int recid) { |
|||
if (sizeof(secp256k1_scalar_t) == 32) { |
|||
memcpy(&sig->data[0], r, 32); |
|||
memcpy(&sig->data[32], s, 32); |
|||
} else { |
|||
secp256k1_scalar_get_b32(&sig->data[0], r); |
|||
secp256k1_scalar_get_b32(&sig->data[32], s); |
|||
} |
|||
sig->data[64] = recid; |
|||
} |
|||
|
|||
int secp256k1_ecdsa_signature_parse_der(const secp256k1_context_t* ctx, secp256k1_ecdsa_signature_t* sig, const unsigned char *input, int inputlen) { |
|||
secp256k1_scalar_t r, s; |
|||
|
|||
(void)ctx; |
|||
ARG_CHECK(sig != NULL); |
|||
ARG_CHECK(input != NULL); |
|||
|
|||
if (secp256k1_ecdsa_sig_parse(&r, &s, input, inputlen)) { |
|||
secp256k1_ecdsa_signature_save(sig, &r, &s, -1); |
|||
return 1; |
|||
} else { |
|||
memset(sig, 0, sizeof(*sig)); |
|||
return 0; |
|||
} |
|||
} |
|||
|
|||
int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context_t* ctx, secp256k1_ecdsa_signature_t* sig, const unsigned char *input64, int recid) { |
|||
secp256k1_scalar_t r, s; |
|||
int ret = 1; |
|||
int overflow = 0; |
|||
|
|||
(void)ctx; |
|||
ARG_CHECK(sig != NULL); |
|||
ARG_CHECK(input64 != NULL); |
|||
|
|||
secp256k1_scalar_set_b32(&r, &input64[0], &overflow); |
|||
ret &= !overflow; |
|||
secp256k1_scalar_set_b32(&s, &input64[32], &overflow); |
|||
ret &= !overflow; |
|||
ret &= (recid == -1 || (recid >= 0 && recid < 4)); |
|||
if (ret) { |
|||
secp256k1_ecdsa_signature_save(sig, &r, &s, recid); |
|||
} else { |
|||
memset(sig, 0, sizeof(*sig)); |
|||
} |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context_t* ctx, unsigned char *output, int *outputlen, const secp256k1_ecdsa_signature_t* sig) { |
|||
secp256k1_scalar_t r, s; |
|||
|
|||
(void)ctx; |
|||
ARG_CHECK(output != NULL); |
|||
ARG_CHECK(outputlen != NULL); |
|||
ARG_CHECK(sig != NULL); |
|||
|
|||
secp256k1_ecdsa_signature_load(ctx, &r, &s, NULL, sig); |
|||
return secp256k1_ecdsa_sig_serialize(output, outputlen, &r, &s); |
|||
} |
|||
|
|||
int secp256k1_ecdsa_signature_serialize_compact(const secp256k1_context_t* ctx, unsigned char *output64, int *recid, const secp256k1_ecdsa_signature_t* sig) { |
|||
secp256k1_scalar_t r, s; |
|||
int rec; |
|||
|
|||
(void)ctx; |
|||
ARG_CHECK(output64 != NULL); |
|||
ARG_CHECK(sig != NULL); |
|||
|
|||
secp256k1_ecdsa_signature_load(ctx, &r, &s, &rec, sig); |
|||
secp256k1_scalar_get_b32(&output64[0], &r); |
|||
secp256k1_scalar_get_b32(&output64[32], &s); |
|||
if (recid) { |
|||
ARG_CHECK(rec >= 0 && rec < 4); |
|||
*recid = rec; |
|||
} |
|||
return 1; |
|||
} |
|||
|
|||
int secp256k1_ecdsa_verify(const secp256k1_context_t* ctx, const unsigned char *msg32, const secp256k1_ecdsa_signature_t *sig, const secp256k1_pubkey_t *pubkey) { |
|||
secp256k1_ge_t q; |
|||
secp256k1_scalar_t r, s; |
|||
secp256k1_scalar_t m; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(msg32 != NULL); |
|||
ARG_CHECK(sig != NULL); |
|||
ARG_CHECK(pubkey != NULL); |
|||
|
|||
secp256k1_scalar_set_b32(&m, msg32, NULL); |
|||
secp256k1_ecdsa_signature_load(ctx, &r, &s, NULL, sig); |
|||
return (secp256k1_pubkey_load(ctx, &q, pubkey) && |
|||
secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); |
|||
} |
|||
|
|||
static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, unsigned int counter, const void *data) { |
|||
unsigned char keydata[112]; |
|||
int keylen = 64; |
|||
secp256k1_rfc6979_hmac_sha256_t rng; |
|||
unsigned int i; |
|||
/* We feed a byte array to the PRNG as input, consisting of:
|
|||
* - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d. |
|||
* - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data. |
|||
* - optionally 16 extra bytes with the algorithm name (the extra data bytes |
|||
* are set to zeroes when not present, while the algorithm name is). |
|||
*/ |
|||
memcpy(keydata, key32, 32); |
|||
memcpy(keydata + 32, msg32, 32); |
|||
if (data != NULL) { |
|||
memcpy(keydata + 64, data, 32); |
|||
keylen = 96; |
|||
} |
|||
if (algo16 != NULL) { |
|||
memset(keydata + keylen, 0, 96 - keylen); |
|||
memcpy(keydata + 96, algo16, 16); |
|||
keylen = 112; |
|||
} |
|||
secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, keylen); |
|||
memset(keydata, 0, sizeof(keydata)); |
|||
for (i = 0; i <= counter; i++) { |
|||
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); |
|||
} |
|||
secp256k1_rfc6979_hmac_sha256_finalize(&rng); |
|||
return 1; |
|||
} |
|||
|
|||
const secp256k1_nonce_function_t secp256k1_nonce_function_rfc6979 = nonce_function_rfc6979; |
|||
const secp256k1_nonce_function_t secp256k1_nonce_function_default = nonce_function_rfc6979; |
|||
|
|||
int secp256k1_ecdsa_sign(const secp256k1_context_t* ctx, const unsigned char *msg32, secp256k1_ecdsa_signature_t *signature, const unsigned char *seckey, secp256k1_nonce_function_t noncefp, const void* noncedata) { |
|||
secp256k1_scalar_t r, s; |
|||
secp256k1_scalar_t sec, non, msg; |
|||
int recid; |
|||
int ret = 0; |
|||
int overflow = 0; |
|||
unsigned int count = 0; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
ARG_CHECK(msg32 != NULL); |
|||
ARG_CHECK(signature != NULL); |
|||
ARG_CHECK(seckey != NULL); |
|||
if (noncefp == NULL) { |
|||
noncefp = secp256k1_nonce_function_default; |
|||
} |
|||
|
|||
secp256k1_scalar_set_b32(&sec, seckey, &overflow); |
|||
/* Fail if the secret key is invalid. */ |
|||
if (!overflow && !secp256k1_scalar_is_zero(&sec)) { |
|||
secp256k1_scalar_set_b32(&msg, msg32, NULL); |
|||
while (1) { |
|||
unsigned char nonce32[32]; |
|||
ret = noncefp(nonce32, msg32, seckey, NULL, count, noncedata); |
|||
if (!ret) { |
|||
break; |
|||
} |
|||
secp256k1_scalar_set_b32(&non, nonce32, &overflow); |
|||
memset(nonce32, 0, 32); |
|||
if (!secp256k1_scalar_is_zero(&non) && !overflow) { |
|||
if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) { |
|||
break; |
|||
} |
|||
} |
|||
count++; |
|||
} |
|||
secp256k1_scalar_clear(&msg); |
|||
secp256k1_scalar_clear(&non); |
|||
secp256k1_scalar_clear(&sec); |
|||
} |
|||
if (ret) { |
|||
secp256k1_ecdsa_signature_save(signature, &r, &s, recid); |
|||
} else { |
|||
memset(signature, 0, sizeof(*signature)); |
|||
} |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ecdsa_recover(const secp256k1_context_t* ctx, const unsigned char *msg32, const secp256k1_ecdsa_signature_t *signature, secp256k1_pubkey_t *pubkey) { |
|||
secp256k1_ge_t q; |
|||
secp256k1_scalar_t r, s; |
|||
secp256k1_scalar_t m; |
|||
int recid; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(msg32 != NULL); |
|||
ARG_CHECK(signature != NULL); |
|||
ARG_CHECK(pubkey != NULL); |
|||
|
|||
secp256k1_ecdsa_signature_load(ctx, &r, &s, &recid, signature); |
|||
ARG_CHECK(recid >= 0 && recid < 4); |
|||
secp256k1_scalar_set_b32(&m, msg32, NULL); |
|||
if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) { |
|||
secp256k1_pubkey_save(pubkey, &q); |
|||
return 1; |
|||
} else { |
|||
memset(pubkey, 0, sizeof(*pubkey)); |
|||
return 0; |
|||
} |
|||
} |
|||
|
|||
int secp256k1_ec_seckey_verify(const secp256k1_context_t* ctx, const unsigned char *seckey) { |
|||
secp256k1_scalar_t sec; |
|||
int ret; |
|||
int overflow; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(seckey != NULL); |
|||
(void)ctx; |
|||
|
|||
secp256k1_scalar_set_b32(&sec, seckey, &overflow); |
|||
ret = !secp256k1_scalar_is_zero(&sec) && !overflow; |
|||
secp256k1_scalar_clear(&sec); |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ec_pubkey_create(const secp256k1_context_t* ctx, secp256k1_pubkey_t *pubkey, const unsigned char *seckey) { |
|||
secp256k1_gej_t pj; |
|||
secp256k1_ge_t p; |
|||
secp256k1_scalar_t sec; |
|||
int overflow; |
|||
int ret = 0; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
ARG_CHECK(pubkey != NULL); |
|||
ARG_CHECK(seckey != NULL); |
|||
|
|||
secp256k1_scalar_set_b32(&sec, seckey, &overflow); |
|||
ret = !overflow & !secp256k1_scalar_is_zero(&sec); |
|||
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec); |
|||
secp256k1_ge_set_gej(&p, &pj); |
|||
secp256k1_pubkey_save(pubkey, &p); |
|||
secp256k1_scalar_clear(&sec); |
|||
if (!ret) { |
|||
memset(pubkey, 0, sizeof(*pubkey)); |
|||
} |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ec_privkey_tweak_add(const secp256k1_context_t* ctx, unsigned char *seckey, const unsigned char *tweak) { |
|||
secp256k1_scalar_t term; |
|||
secp256k1_scalar_t sec; |
|||
int ret = 0; |
|||
int overflow = 0; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(seckey != NULL); |
|||
ARG_CHECK(tweak != NULL); |
|||
(void)ctx; |
|||
|
|||
secp256k1_scalar_set_b32(&term, tweak, &overflow); |
|||
secp256k1_scalar_set_b32(&sec, seckey, NULL); |
|||
|
|||
ret = secp256k1_eckey_privkey_tweak_add(&sec, &term) && !overflow; |
|||
if (ret) { |
|||
secp256k1_scalar_get_b32(seckey, &sec); |
|||
} |
|||
|
|||
secp256k1_scalar_clear(&sec); |
|||
secp256k1_scalar_clear(&term); |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ec_pubkey_tweak_add(const secp256k1_context_t* ctx, secp256k1_pubkey_t *pubkey, const unsigned char *tweak) { |
|||
secp256k1_ge_t p; |
|||
secp256k1_scalar_t term; |
|||
int ret = 0; |
|||
int overflow = 0; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(pubkey != NULL); |
|||
ARG_CHECK(tweak != NULL); |
|||
|
|||
secp256k1_scalar_set_b32(&term, tweak, &overflow); |
|||
if (!overflow && secp256k1_pubkey_load(ctx, &p, pubkey)) { |
|||
ret = secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term); |
|||
if (ret) { |
|||
secp256k1_pubkey_save(pubkey, &p); |
|||
} else { |
|||
memset(pubkey, 0, sizeof(*pubkey)); |
|||
} |
|||
} |
|||
|
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ec_privkey_tweak_mul(const secp256k1_context_t* ctx, unsigned char *seckey, const unsigned char *tweak) { |
|||
secp256k1_scalar_t factor; |
|||
secp256k1_scalar_t sec; |
|||
int ret = 0; |
|||
int overflow = 0; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(seckey != NULL); |
|||
ARG_CHECK(tweak != NULL); |
|||
(void)ctx; |
|||
|
|||
secp256k1_scalar_set_b32(&factor, tweak, &overflow); |
|||
secp256k1_scalar_set_b32(&sec, seckey, NULL); |
|||
ret = secp256k1_eckey_privkey_tweak_mul(&sec, &factor) && !overflow; |
|||
if (ret) { |
|||
secp256k1_scalar_get_b32(seckey, &sec); |
|||
} |
|||
|
|||
secp256k1_scalar_clear(&sec); |
|||
secp256k1_scalar_clear(&factor); |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context_t* ctx, secp256k1_pubkey_t *pubkey, const unsigned char *tweak) { |
|||
secp256k1_ge_t p; |
|||
secp256k1_scalar_t factor; |
|||
int ret = 0; |
|||
int overflow = 0; |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); |
|||
ARG_CHECK(pubkey != NULL); |
|||
ARG_CHECK(tweak != NULL); |
|||
|
|||
secp256k1_scalar_set_b32(&factor, tweak, &overflow); |
|||
if (!overflow && secp256k1_pubkey_load(ctx, &p, pubkey)) { |
|||
ret = secp256k1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor); |
|||
if (ret) { |
|||
secp256k1_pubkey_save(pubkey, &p); |
|||
} else { |
|||
memset(pubkey, 0, sizeof(*pubkey)); |
|||
} |
|||
} |
|||
|
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ec_privkey_export(const secp256k1_context_t* ctx, const unsigned char *seckey, unsigned char *privkey, int *privkeylen, int compressed) { |
|||
secp256k1_scalar_t key; |
|||
int ret = 0; |
|||
ARG_CHECK(seckey != NULL); |
|||
ARG_CHECK(privkey != NULL); |
|||
ARG_CHECK(privkeylen != NULL); |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
|
|||
secp256k1_scalar_set_b32(&key, seckey, NULL); |
|||
ret = secp256k1_eckey_privkey_serialize(&ctx->ecmult_gen_ctx, privkey, privkeylen, &key, compressed); |
|||
secp256k1_scalar_clear(&key); |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_ec_privkey_import(const secp256k1_context_t* ctx, unsigned char *seckey, const unsigned char *privkey, int privkeylen) { |
|||
secp256k1_scalar_t key; |
|||
int ret = 0; |
|||
ARG_CHECK(seckey != NULL); |
|||
ARG_CHECK(privkey != NULL); |
|||
(void)ctx; |
|||
|
|||
ret = secp256k1_eckey_privkey_parse(&key, privkey, privkeylen); |
|||
if (ret) { |
|||
secp256k1_scalar_get_b32(seckey, &key); |
|||
} |
|||
secp256k1_scalar_clear(&key); |
|||
return ret; |
|||
} |
|||
|
|||
int secp256k1_context_randomize(secp256k1_context_t* ctx, const unsigned char *seed32) { |
|||
ARG_CHECK(ctx != NULL); |
|||
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); |
|||
secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); |
|||
return 1; |
|||
} |
|||
|
|||
int secp256k1_ec_pubkey_combine(const secp256k1_context_t* ctx, secp256k1_pubkey_t *pubnonce, int n, const secp256k1_pubkey_t * const *pubnonces) { |
|||
int i; |
|||
secp256k1_gej_t Qj; |
|||
secp256k1_ge_t Q; |
|||
|
|||
ARG_CHECK(pubnonce != NULL); |
|||
ARG_CHECK(n >= 1); |
|||
ARG_CHECK(pubnonces != NULL); |
|||
|
|||
secp256k1_gej_set_infinity(&Qj); |
|||
|
|||
for (i = 0; i < n; i++) { |
|||
secp256k1_pubkey_load(ctx, &Q, pubnonces[i]); |
|||
secp256k1_gej_add_ge(&Qj, &Qj, &Q); |
|||
} |
|||
if (secp256k1_gej_is_infinity(&Qj)) { |
|||
memset(pubnonce, 0, sizeof(*pubnonce)); |
|||
return 0; |
|||
} |
|||
secp256k1_ge_set_gej(&Q, &Qj); |
|||
secp256k1_pubkey_save(pubnonce, &Q); |
|||
return 1; |
|||
} |
|||
|
|||
#ifdef ENABLE_MODULE_ECDH |
|||
# include "modules/ecdh/main_impl.h" |
|||
#endif |
|||
|
|||
#ifdef ENABLE_MODULE_SCHNORR |
|||
# include "modules/schnorr/main_impl.h" |
|||
#endif |
|||
|
|||
#ifdef ENABLE_MODULE_RANGEPROOF |
|||
# include "modules/rangeproof/main_impl.h" |
|||
#endif |
@ -0,0 +1,453 @@ |
|||
#ifndef _SECP256K1_ |
|||
# define _SECP256K1_ |
|||
|
|||
# ifdef __cplusplus |
|||
extern "C" { |
|||
# endif |
|||
|
|||
# if !defined(SECP256K1_GNUC_PREREQ) |
|||
# if defined(__GNUC__)&&defined(__GNUC_MINOR__) |
|||
# define SECP256K1_GNUC_PREREQ(_maj,_min) \ |
|||
((__GNUC__<<16)+__GNUC_MINOR__>=((_maj)<<16)+(_min)) |
|||
# else |
|||
# define SECP256K1_GNUC_PREREQ(_maj,_min) 0 |
|||
# endif |
|||
# endif |
|||
|
|||
# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) |
|||
# if SECP256K1_GNUC_PREREQ(2,7) |
|||
# define SECP256K1_INLINE __inline__ |
|||
# elif (defined(_MSC_VER)) |
|||
# define SECP256K1_INLINE __inline |
|||
# else |
|||
# define SECP256K1_INLINE |
|||
# endif |
|||
# else |
|||
# define SECP256K1_INLINE inline |
|||
# endif |
|||
|
|||
/**Warning attributes
|
|||
* NONNULL is not used if SECP256K1_BUILD is set to avoid the compiler optimizing out |
|||
* some paranoid null checks. */ |
|||
# if defined(__GNUC__) && SECP256K1_GNUC_PREREQ(3, 4) |
|||
# define SECP256K1_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) |
|||
# else |
|||
# define SECP256K1_WARN_UNUSED_RESULT |
|||
# endif |
|||
# if !defined(SECP256K1_BUILD) && defined(__GNUC__) && SECP256K1_GNUC_PREREQ(3, 4) |
|||
# define SECP256K1_ARG_NONNULL(_x) __attribute__ ((__nonnull__(_x))) |
|||
# else |
|||
# define SECP256K1_ARG_NONNULL(_x) |
|||
# endif |
|||
|
|||
/** Opaque data structure that holds context information (precomputed tables etc.).
|
|||
* Only functions that take a pointer to a non-const context require exclusive |
|||
* access to it. Multiple functions that take a pointer to a const context may |
|||
* run simultaneously. |
|||
*/ |
|||
typedef struct secp256k1_context_struct secp256k1_context_t; |
|||
|
|||
/** Flags to pass to secp256k1_context_create. */ |
|||
# define SECP256K1_CONTEXT_VERIFY (1 << 0) |
|||
# define SECP256K1_CONTEXT_SIGN (1 << 1) |
|||
|
|||
/** Create a secp256k1 context object.
|
|||
* Returns: a newly created context object. |
|||
* In: flags: which parts of the context to initialize. |
|||
*/ |
|||
secp256k1_context_t* secp256k1_context_create( |
|||
int flags |
|||
) SECP256K1_WARN_UNUSED_RESULT; |
|||
|
|||
/** Copies a secp256k1 context object.
|
|||
* Returns: a newly created context object. |
|||
* In: ctx: an existing context to copy |
|||
*/ |
|||
secp256k1_context_t* secp256k1_context_clone( |
|||
const secp256k1_context_t* ctx |
|||
) SECP256K1_WARN_UNUSED_RESULT; |
|||
|
|||
/** Destroy a secp256k1 context object.
|
|||
* The context pointer may not be used afterwards. |
|||
*/ |
|||
void secp256k1_context_destroy( |
|||
secp256k1_context_t* ctx |
|||
) SECP256K1_ARG_NONNULL(1); |
|||
|
|||
/** Set a callback function to be called when an illegal argument is passed to
|
|||
* an API call. The philosophy is that these shouldn't be dealt with through a |
|||
* specific return value, as calling code should not have branches to deal with |
|||
* the case that this code itself is broken. |
|||
* On the other hand, during debug stage, one would want to be informed about |
|||
* such mistakes, and the default (crashing) may be inadvisable. |
|||
* When this callback is triggered, the API function called is guaranteed not |
|||
* to cause a crash, though its return value and output arguments are |
|||
* undefined. |
|||
*/ |
|||
void secp256k1_context_set_illegal_callback( |
|||
secp256k1_context_t* ctx, |
|||
void (*fun)(const char* message, void* data), |
|||
void* data |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); |
|||
|
|||
/** Set a callback function to be called when an internal consistency check
|
|||
* fails. The default is crashing. |
|||
* This can only trigger in case of a hardware failure, miscompilation, |
|||
* memory corruption, serious bug in the library, or other error would can |
|||
* otherwise result in undefined behaviour. It will not trigger due to mere |
|||
* incorrect usage of the API (see secp256k1_context_set_illegal_callback |
|||
* for that). After this callback returns, anything may happen, including |
|||
* crashing. |
|||
*/ |
|||
void secp256k1_context_set_error_callback( |
|||
secp256k1_context_t* ctx, |
|||
void (*fun)(const char* message, void* data), |
|||
void* data |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); |
|||
|
|||
/** Data type to hold a parsed and valid public key.
|
|||
This data type should be considered opaque to the user, and only created |
|||
through API functions. It is not guaranteed to be compatible between |
|||
different implementations. If you need to convert to a format suitable |
|||
for storage or transmission, use secp256k1_ec_pubkey_serialize and |
|||
secp256k1_ec_pubkey_parse. |
|||
*/ |
|||
typedef struct { |
|||
unsigned char data[64]; |
|||
} secp256k1_pubkey_t; |
|||
|
|||
/** Parse a variable-length public key into the pubkey object.
|
|||
* Returns: 1 if the public key was fully valid. |
|||
* 0 if the public key could not be parsed or is invalid. |
|||
* In: ctx: a secp256k1 context object. |
|||
* input: pointer to a serialized public key |
|||
* inputlen: length of the array pointed to by input |
|||
* Out: pubkey: pointer to a pubkey object. If 1 is returned, it is set to a |
|||
* parsed version of input. If not, its value is undefined. |
|||
* This function supports parsing compressed (33 bytes, header byte 0x02 or |
|||
* 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header |
|||
* byte 0x06 or 0x07) format public keys. |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_parse( |
|||
const secp256k1_context_t* ctx, |
|||
secp256k1_pubkey_t* pubkey, |
|||
const unsigned char *input, |
|||
int inputlen |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Serialize a pubkey object into a serialized byte sequence.
|
|||
* Returns: 1 always. |
|||
* In: ctx: a secp256k1 context object. |
|||
* pubkey: a pointer to a secp256k1_pubkey_t containing an initialized |
|||
* public key. |
|||
* compressed: whether to serialize in compressed format. |
|||
* Out: output: a pointer to a 65-byte (if compressed==0) or 33-byte (if |
|||
* compressed==1) byte array to place the serialized key in. |
|||
* outputlen: a pointer to an integer which will contain the serialized |
|||
* size. |
|||
*/ |
|||
int secp256k1_ec_pubkey_serialize( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *output, |
|||
int *outputlen, |
|||
const secp256k1_pubkey_t* pubkey, |
|||
int compressed |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Data type to hold a parsed ECDSA signature, optionally supporting pubkey
|
|||
* recovery. |
|||
This data type should be considered opaque to the user, and only created |
|||
through API functions. It is not guaranteed to be compatible between |
|||
different implementations. If you need to convert to a format suitable |
|||
for storage or transmission, use secp256k1_ecdsa_signature_serialize_* and |
|||
secp256k1_ecdsa_signature_parse_* functions. */ |
|||
typedef struct { |
|||
unsigned char data[65]; |
|||
} secp256k1_ecdsa_signature_t; |
|||
|
|||
/** Parse a DER ECDSA signature.
|
|||
* Returns: 1 when the signature could be parsed, 0 otherwise. |
|||
* In: ctx: a secp256k1 context object |
|||
* input: a pointer to the signature to be parsed |
|||
* inputlen: the length of the array pointed to be input |
|||
* Out: sig: a pointer to a signature object |
|||
* |
|||
* Note that this function also supports some violations of DER. |
|||
* |
|||
* The resulting signature object will not support pubkey recovery. |
|||
*/ |
|||
int secp256k1_ecdsa_signature_parse_der( |
|||
const secp256k1_context_t* ctx, |
|||
secp256k1_ecdsa_signature_t* sig, |
|||
const unsigned char *input, |
|||
int inputlen |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Parse a compact ECDSA signature (64 bytes + recovery id).
|
|||
* Returns: 1 when the signature could be parsed, 0 otherwise |
|||
* In: ctx: a secp256k1 context object |
|||
* input64: a pointer to a 64-byte compact signature |
|||
* recid: the recovery id (0, 1, 2 or 3, or -1 for unknown) |
|||
* Out: sig: a pointer to a signature object |
|||
* |
|||
* If recid is not -1, the resulting signature object will support pubkey |
|||
* recovery. |
|||
*/ |
|||
int secp256k1_ecdsa_signature_parse_compact( |
|||
const secp256k1_context_t* ctx, |
|||
secp256k1_ecdsa_signature_t* sig, |
|||
const unsigned char *input64, |
|||
int recid |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Serialize an ECDSA signature in DER format.
|
|||
* Returns: 1 if enough space was available to serialize, 0 otherwise |
|||
* In: ctx: a secp256k1 context object |
|||
* sig: a pointer to an initialized signature object |
|||
* Out: output: a pointer to an array to store the DER serialization |
|||
* In/Out: outputlen: a pointer to a length integer. Initially, this integer |
|||
* should be set to the length of output. After the call |
|||
* it will be set to the length of the serialization (even |
|||
* if 0 was returned). |
|||
*/ |
|||
int secp256k1_ecdsa_signature_serialize_der( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *output, |
|||
int *outputlen, |
|||
const secp256k1_ecdsa_signature_t* sig |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Serialize an ECDSA signature in compact format (64 bytes + recovery id).
|
|||
* Returns: 1 |
|||
* In: ctx: a secp256k1 context object |
|||
* sig: a pointer to an initialized signature object (cannot be NULL) |
|||
* Out: output64: a pointer to a 64-byte array of the compact signature (cannot be NULL) |
|||
* recid: a pointer to an integer to hold the recovery id (can be NULL). |
|||
* |
|||
* If recid is not NULL, the signature must support pubkey recovery. |
|||
*/ |
|||
int secp256k1_ecdsa_signature_serialize_compact( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *output64, |
|||
int *recid, |
|||
const secp256k1_ecdsa_signature_t* sig |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Verify an ECDSA signature.
|
|||
* Returns: 1: correct signature |
|||
* 0: incorrect or unparseable signature |
|||
* In: ctx: a secp256k1 context object, initialized for verification. |
|||
* msg32: the 32-byte message hash being verified (cannot be NULL) |
|||
* sig: the signature being verified (cannot be NULL) |
|||
* pubkey: pointer to an initialized public key to verify with (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_verify( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *msg32, |
|||
const secp256k1_ecdsa_signature_t *sig, |
|||
const secp256k1_pubkey_t *pubkey |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** A pointer to a function to deterministically generate a nonce.
|
|||
* Returns: 1 if a nonce was successfully generated. 0 will cause signing to fail. |
|||
* In: msg32: the 32-byte message hash being verified (will not be NULL) |
|||
* key32: pointer to a 32-byte secret key (will not be NULL) |
|||
* algo16: pointer to a 16-byte array describing the signature |
|||
* algorithm (will be NULL for ECDSA for compatibility). |
|||
* attempt: how many iterations we have tried to find a nonce. |
|||
* This will almost always be 0, but different attempt values |
|||
* are required to result in a different nonce. |
|||
* data: Arbitrary data pointer that is passed through. |
|||
* Out: nonce32: pointer to a 32-byte array to be filled by the function. |
|||
* Except for test cases, this function should compute some cryptographic hash of |
|||
* the message, the key and the attempt. |
|||
*/ |
|||
typedef int (*secp256k1_nonce_function_t)( |
|||
unsigned char *nonce32, |
|||
const unsigned char *msg32, |
|||
const unsigned char *key32, |
|||
const unsigned char *algo16, |
|||
unsigned int attempt, |
|||
const void *data |
|||
); |
|||
|
|||
/** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function.
|
|||
* If a data pointer is passed, it is assumed to be a pointer to 32 bytes of |
|||
* extra entropy. |
|||
*/ |
|||
extern const secp256k1_nonce_function_t secp256k1_nonce_function_rfc6979; |
|||
|
|||
/** A default safe nonce generation function (currently equal to secp256k1_nonce_function_rfc6979). */ |
|||
extern const secp256k1_nonce_function_t secp256k1_nonce_function_default; |
|||
|
|||
/** Create an ECDSA signature.
|
|||
* Returns: 1: signature created |
|||
* 0: the nonce generation function failed, or the private key was invalid. |
|||
* In: ctx: pointer to a context object, initialized for signing (cannot be NULL) |
|||
* msg32: the 32-byte message hash being signed (cannot be NULL) |
|||
* seckey: pointer to a 32-byte secret key (cannot be NULL) |
|||
* noncefp:pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used |
|||
* ndata: pointer to arbitrary data used by the nonce generation function (can be NULL) |
|||
* Out: sig: pointer to an array where the signature will be placed (cannot be NULL) |
|||
* |
|||
* The resulting signature will support pubkey recovery. |
|||
* |
|||
* The sig always has an s value in the lower half of the range (From 0x1 |
|||
* to 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0, |
|||
* inclusive), unlike many other implementations. |
|||
* With ECDSA a third-party can can forge a second distinct signature |
|||
* of the same message given a single initial signature without knowing |
|||
* the key by setting s to its additive inverse mod-order, 'flipping' the |
|||
* sign of the random point R which is not included in the signature. |
|||
* Since the forgery is of the same message this isn't universally |
|||
* problematic, but in systems where message malleability or uniqueness |
|||
* of signatures is important this can cause issues. This forgery can be |
|||
* blocked by all verifiers forcing signers to use a canonical form. The |
|||
* lower-S form reduces the size of signatures slightly on average when |
|||
* variable length encodings (such as DER) are used and is cheap to |
|||
* verify, making it a good choice. Security of always using lower-S is |
|||
* assured because anyone can trivially modify a signature after the |
|||
* fact to enforce this property. Adjusting it inside the signing |
|||
* function avoids the need to re-serialize or have curve specific |
|||
* constants outside of the library. By always using a canonical form |
|||
* even in applications where it isn't needed it becomes possible to |
|||
* impose a requirement later if a need is discovered. |
|||
* No other forms of ECDSA malleability are known and none seem likely, |
|||
* but there is no formal proof that ECDSA, even with this additional |
|||
* restriction, is free of other malleability. Commonly used serialization |
|||
* schemes will also accept various non-unique encodings, so care should |
|||
* be taken when this property is required for an application. |
|||
*/ |
|||
int secp256k1_ecdsa_sign( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *msg32, |
|||
secp256k1_ecdsa_signature_t *sig, |
|||
const unsigned char *seckey, |
|||
secp256k1_nonce_function_t noncefp, |
|||
const void *ndata |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Recover an ECDSA public key from a signature.
|
|||
* Returns: 1: public key successfully recovered (which guarantees a correct signature). |
|||
* 0: otherwise. |
|||
* In: ctx: pointer to a context object, initialized for verification (cannot be NULL) |
|||
* msg32: the 32-byte message hash assumed to be signed (cannot be NULL) |
|||
* sig64: pointer to initialized signature that supports pubkey recovery (cannot be NULL) |
|||
* Out: pubkey: pointer to the recoved public key (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_recover( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *msg32, |
|||
const secp256k1_ecdsa_signature_t *sig, |
|||
secp256k1_pubkey_t *pubkey |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Verify an ECDSA secret key.
|
|||
* Returns: 1: secret key is valid |
|||
* 0: secret key is invalid |
|||
* In: ctx: pointer to a context object (cannot be NULL) |
|||
* seckey: pointer to a 32-byte secret key (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_seckey_verify( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *seckey |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); |
|||
|
|||
/** Compute the public key for a secret key.
|
|||
* In: ctx: pointer to a context object, initialized for signing (cannot be NULL) |
|||
* seckey: pointer to a 32-byte private key (cannot be NULL) |
|||
* Out: pubkey: pointer to the created public key (cannot be NULL) |
|||
* Returns: 1: secret was valid, public key stores |
|||
* 0: secret was invalid, try again |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_create( |
|||
const secp256k1_context_t* ctx, |
|||
secp256k1_pubkey_t *pubkey, |
|||
const unsigned char *seckey |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Export a private key in DER format.
|
|||
* In: ctx: pointer to a context object, initialized for signing (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_export( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *seckey, |
|||
unsigned char *privkey, |
|||
int *privkeylen, |
|||
int compressed |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Import a private key in DER format. */ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_import( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *seckey, |
|||
const unsigned char *privkey, |
|||
int privkeylen |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Tweak a private key by adding tweak to it. */ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_add( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *seckey, |
|||
const unsigned char *tweak |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Tweak a public key by adding tweak times the generator to it.
|
|||
* In: ctx: pointer to a context object, initialized for verification (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_add( |
|||
const secp256k1_context_t* ctx, |
|||
secp256k1_pubkey_t *pubkey, |
|||
const unsigned char *tweak |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Tweak a private key by multiplying it with tweak. */ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_mul( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *seckey, |
|||
const unsigned char *tweak |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Tweak a public key by multiplying it with tweak.
|
|||
* In: ctx: pointer to a context object, initialized for verification (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul( |
|||
const secp256k1_context_t* ctx, |
|||
secp256k1_pubkey_t *pubkey, |
|||
const unsigned char *tweak |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Updates the context randomization.
|
|||
* Returns: 1: randomization successfully updated |
|||
* 0: error |
|||
* In: ctx: pointer to a context object (cannot be NULL) |
|||
* seed32: pointer to a 32-byte random seed (NULL resets to initial state) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize( |
|||
secp256k1_context_t* ctx, |
|||
const unsigned char *seed32 |
|||
) SECP256K1_ARG_NONNULL(1); |
|||
|
|||
/** Add a number of public keys together.
|
|||
* Returns: 1: the sum of the public keys is valid. |
|||
* 0: the sum of the public keys is not valid. |
|||
* In: ctx: pointer to a context object |
|||
* out: pointer to pubkey for placing the resulting public key |
|||
* (cannot be NULL) |
|||
* n: the number of public keys to add together (must be at least 1) |
|||
* ins: pointer to array of pointers to public keys (cannot be NULL) |
|||
* Use secp256k1_ec_pubkey_compress and secp256k1_ec_pubkey_decompress if the |
|||
* uncompressed format is needed. |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_combine( |
|||
const secp256k1_context_t* ctx, |
|||
secp256k1_pubkey_t *out, |
|||
int n, |
|||
const secp256k1_pubkey_t * const * ins |
|||
) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
# ifdef __cplusplus |
|||
} |
|||
# endif |
|||
|
|||
#endif |
@ -0,0 +1,30 @@ |
|||
#ifndef _SECP256K1_ECDH_ |
|||
# define _SECP256K1_ECDH_ |
|||
|
|||
# include "secp256k1.h" |
|||
|
|||
# ifdef __cplusplus |
|||
extern "C" { |
|||
# endif |
|||
|
|||
/** Compute an EC Diffie-Hellman secret in constant time
|
|||
* Returns: 1: exponentiation was successful |
|||
* 0: scalar was invalid (zero or overflow) |
|||
* In: ctx: pointer to a context object (cannot be NULL) |
|||
* point: pointer to a public point |
|||
* scalar: a 32-byte scalar with which to multiply the point |
|||
* Out: result: a 32-byte array which will be populated by an ECDH |
|||
* secret computed from the point and scalar |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdh( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *result, |
|||
const secp256k1_pubkey_t *point, |
|||
const unsigned char *scalar |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
# ifdef __cplusplus |
|||
} |
|||
# endif |
|||
|
|||
#endif |
@ -0,0 +1,186 @@ |
|||
#ifndef _SECP256K1_RANGEPROOF_ |
|||
# define _SECP256K1_RANGEPROOF_ |
|||
|
|||
# include "secp256k1.h" |
|||
|
|||
# ifdef __cplusplus |
|||
extern "C" { |
|||
# endif |
|||
|
|||
#include <stdint.h> |
|||
|
|||
/** Initialize a context for usage with Pedersen commitments. */ |
|||
int secp256k1_pedersen_context_initialize(secp256k1_context_t* ctx); |
|||
|
|||
/** Generate a pedersen commitment.
|
|||
* Returns 1: commitment successfully created. |
|||
* 0: error |
|||
* In: ctx: pointer to a context object, initialized for signing and Pedersen commitment (cannot be NULL) |
|||
* blind: pointer to a 32-byte blinding factor (cannot be NULL) |
|||
* value: unsigned 64-bit integer value to commit to. |
|||
* Out: commit: pointer to a 33-byte array for the commitment (cannot be NULL) |
|||
* |
|||
* Blinding factors can be generated and verified in the same way as secp256k1 private keys for ECDSA. |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_commit( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *commit, |
|||
unsigned char *blind, |
|||
uint64_t value |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Computes the sum of multiple positive and negative blinding factors.
|
|||
* Returns 1: sum successfully computed. |
|||
* 0: error |
|||
* In: ctx: pointer to a context object (cannot be NULL) |
|||
* blinds: pointer to pointers to 32-byte character arrays for blinding factors. (cannot be NULL) |
|||
* n: number of factors pointed to by blinds. |
|||
* nneg: how many of the initial factors should be treated with a positive sign. |
|||
* Out: blind_out: pointer to a 32-byte array for the sum (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_blind_sum( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *blind_out, |
|||
const unsigned char * const *blinds, |
|||
int n, |
|||
int npositive |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); |
|||
|
|||
/** Verify a tally of pedersen commitments
|
|||
* Returns 1: commitments successfully sum to zero. |
|||
* 0: Commitments do not sum to zero or other error. |
|||
* In: ctx: pointer to a context object, initialized for Pedersen commitment (cannot be NULL) |
|||
* commits: pointer to pointers to 33-byte character arrays for the commitments. (cannot be NULL if pcnt is non-zero) |
|||
* pcnt: number of commitments pointed to by commits. |
|||
* ncommits: pointer to pointers to 33-byte character arrays for negative commitments. (cannot be NULL if ncnt is non-zero) |
|||
* ncnt: number of commitments pointed to by ncommits. |
|||
* excess: signed 64bit amount to add to the total to bring it to zero, can be negative. |
|||
* |
|||
* This computes sum(commit[0..pcnt)) - sum(ncommit[0..ncnt)) - excess*H == 0. |
|||
* |
|||
* A pedersen commitment is xG + vH where G and H are generators for the secp256k1 group and x is a blinding factor, |
|||
* while v is the committed value. For a collection of commitments to sum to zero both their blinding factors and |
|||
* values must sum to zero. |
|||
* |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_verify_tally( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char * const *commits, |
|||
int pcnt, |
|||
const unsigned char * const *ncommits, |
|||
int ncnt, |
|||
int64_t excess |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Initialize a context for usage with Pedersen commitments. */ |
|||
int secp256k1_rangeproof_context_initialize(secp256k1_context_t* ctx); |
|||
|
|||
/** Verify a proof that a committed value is within a range.
|
|||
* Returns 1: Value is within the range [0..2^64), the specifically proven range is in the min/max value outputs. |
|||
* 0: Proof failed or other error. |
|||
* In: ctx: pointer to a context object, initialized for range-proof and commitment (cannot be NULL) |
|||
* commit: the 33-byte commitment being proved. (cannot be NULL) |
|||
* proof: pointer to character array with the proof. (cannot be NULL) |
|||
* plen: length of proof in bytes. |
|||
* Out: min_value: pointer to a unsigned int64 which will be updated with the minimum value that commit could have. (cannot be NULL) |
|||
* max_value: pointer to a unsigned int64 which will be updated with the maximum value that commit could have. (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_rangeproof_verify( |
|||
const secp256k1_context_t* ctx, |
|||
uint64_t *min_value, |
|||
uint64_t *max_value, |
|||
const unsigned char *commit, |
|||
const unsigned char *proof, |
|||
int plen |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); |
|||
|
|||
/** Verify a range proof proof and rewind the proof to recover information sent by its author.
|
|||
* Returns 1: Value is within the range [0..2^64), the specifically proven range is in the min/max value outputs, and the value and blinding were recovered. |
|||
* 0: Proof failed, rewind failed, or other error. |
|||
* In: ctx: pointer to a context object, initialized for range-proof and Pedersen commitment (cannot be NULL) |
|||
* commit: the 33-byte commitment being proved. (cannot be NULL) |
|||
* proof: pointer to character array with the proof. (cannot be NULL) |
|||
* plen: length of proof in bytes. |
|||
* nonce: 32-byte secret nonce used by the prover (cannot be NULL) |
|||
* In/Out: blind_out: storage for the 32-byte blinding factor used for the commitment |
|||
* value_out: pointer to an unsigned int64 which has the exact value of the commitment. |
|||
* message_out: pointer to a 4096 byte character array to receive message data from the proof author. |
|||
* outlen: length of message data written to message_out. |
|||
* min_value: pointer to an unsigned int64 which will be updated with the minimum value that commit could have. (cannot be NULL) |
|||
* max_value: pointer to an unsigned int64 which will be updated with the maximum value that commit could have. (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_rangeproof_rewind( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *blind_out, |
|||
uint64_t *value_out, |
|||
unsigned char *message_out, |
|||
int *outlen, |
|||
const unsigned char *nonce, |
|||
uint64_t *min_value, |
|||
uint64_t *max_value, |
|||
const unsigned char *commit, |
|||
const unsigned char *proof, |
|||
int plen |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(6) SECP256K1_ARG_NONNULL(7) SECP256K1_ARG_NONNULL(8) SECP256K1_ARG_NONNULL(9) SECP256K1_ARG_NONNULL(10); |
|||
|
|||
/** Author a proof that a committed value is within a range.
|
|||
* Returns 1: Proof successfully created. |
|||
* 0: Error |
|||
* In: ctx: pointer to a context object, initialized for range-proof, signing, and Pedersen commitment (cannot be NULL) |
|||
* proof: pointer to array to receive the proof, can be up to 5134 bytes. (cannot be NULL) |
|||
* min_value: constructs a proof where the verifer can tell the minimum value is at least the specified amount. |
|||
* commit: 33-byte array with the commitment being proved. |
|||
* blind: 32-byte blinding factor used by commit. |
|||
* nonce: 32-byte secret nonce used to initialize the proof (value can be reverse-engineered out of the proof if this secret is known.) |
|||
* exp: Base-10 exponent. Digits below above will be made public, but the proof will be made smaller. Allowed range is -1 to 18. |
|||
* (-1 is a special case that makes the value public. 0 is the most private.) |
|||
* min_bits: Number of bits of the value to keep private. (0 = auto/minimal, - 64). |
|||
* value: Actual value of the commitment. |
|||
* In/out: plen: point to an integer with the size of the proof buffer and the size of the constructed proof. |
|||
* |
|||
* If min_value or exp is non-zero then the value must be on the range [0, 2^63) to prevent the proof range from spanning past 2^64. |
|||
* |
|||
* If exp is -1 the value is revealed by the proof (e.g. it proves that the proof is a blinding of a specific value, without revealing the blinding key.) |
|||
* |
|||
* This can randomly fail with probability around one in 2^100. If this happens, buy a lottery ticket and retry with a different nonce or blinding. |
|||
* |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_rangeproof_sign( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *proof, |
|||
int *plen, |
|||
uint64_t min_value, |
|||
const unsigned char *commit, |
|||
const unsigned char *blind, |
|||
const unsigned char *nonce, |
|||
int exp, |
|||
int min_bits, |
|||
uint64_t value |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6) SECP256K1_ARG_NONNULL(7); |
|||
|
|||
/** Extract some basic information from a range-proof.
|
|||
* Returns 1: Information successfully extracted. |
|||
* 0: Decode failed. |
|||
* In: ctx: pointer to a context object |
|||
* proof: pointer to character array with the proof. |
|||
* plen: length of proof in bytes. |
|||
* Out: exp: Exponent used in the proof (-1 means the value isn't private). |
|||
* mantissa: Number of bits covered by the proof. |
|||
* min_value: pointer to an unsigned int64 which will be updated with the minimum value that commit could have. (cannot be NULL) |
|||
* max_value: pointer to an unsigned int64 which will be updated with the maximum value that commit could have. (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_rangeproof_info( |
|||
const secp256k1_context_t* ctx, |
|||
int *exp, |
|||
int *mantissa, |
|||
uint64_t *min_value, |
|||
uint64_t *max_value, |
|||
const unsigned char *proof, |
|||
int plen |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); |
|||
|
|||
# ifdef __cplusplus |
|||
} |
|||
# endif |
|||
|
|||
#endif |
@ -0,0 +1,173 @@ |
|||
#ifndef _SECP256K1_SCHNORR_ |
|||
# define _SECP256K1_SCHNORR_ |
|||
|
|||
# include "secp256k1.h" |
|||
|
|||
# ifdef __cplusplus |
|||
extern "C" { |
|||
# endif |
|||
|
|||
/** Create a signature using a custom EC-Schnorr-SHA256 construction. It
|
|||
* produces non-malleable 64-byte signatures which support public key recovery |
|||
* batch validation, and multiparty signing. |
|||
* Returns: 1: signature created |
|||
* 0: the nonce generation function failed, or the private key was |
|||
* invalid. |
|||
* In: ctx: pointer to a context object, initialized for signing |
|||
* (cannot be NULL) |
|||
* msg32: the 32-byte message hash being signed (cannot be NULL) |
|||
* seckey: pointer to a 32-byte secret key (cannot be NULL) |
|||
* noncefp:pointer to a nonce generation function. If NULL, |
|||
* secp256k1_nonce_function_default is used |
|||
* ndata: pointer to arbitrary data used by the nonce generation |
|||
* function (can be NULL) |
|||
* Out: sig64: pointer to a 64-byte array where the signature will be |
|||
* placed (cannot be NULL) |
|||
*/ |
|||
int secp256k1_schnorr_sign( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *msg32, |
|||
unsigned char *sig64, |
|||
const unsigned char *seckey, |
|||
secp256k1_nonce_function_t noncefp, |
|||
const void *ndata |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Verify a signature created by secp256k1_schnorr_sign.
|
|||
* Returns: 1: correct signature |
|||
* 0: incorrect signature |
|||
* In: ctx: a secp256k1 context object, initialized for verification. |
|||
* msg32: the 32-byte message hash being verified (cannot be NULL) |
|||
* sig64: the 64-byte signature being verified (cannot be NULL) |
|||
* pubkey: the public key to verify with (cannot be NULL) |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_verify( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *msg32, |
|||
const unsigned char *sig64, |
|||
const secp256k1_pubkey_t *pubkey |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Recover an EC public key from a Schnorr signature created using
|
|||
* secp256k1_schnorr_sign. |
|||
* Returns: 1: public key successfully recovered (which guarantees a correct |
|||
* signature). |
|||
* 0: otherwise. |
|||
* In: ctx: pointer to a context object, initialized for |
|||
* verification (cannot be NULL) |
|||
* msg32: the 32-byte message hash assumed to be signed (cannot |
|||
* be NULL) |
|||
* sig64: signature as 64 byte array (cannot be NULL) |
|||
* Out: pubkey: pointer to a pubkey to set to the recovered public key |
|||
* (cannot be NULL). |
|||
*/ |
|||
int secp256k1_schnorr_recover( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *msg32, |
|||
const unsigned char *sig64, |
|||
secp256k1_pubkey_t *pubkey |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
/** Generate a nonce pair deterministically for use with
|
|||
* secp256k1_schnorr_partial_sign. |
|||
* Returns: 1: valid nonce pair was generated. |
|||
* 0: otherwise (nonce generation function failed) |
|||
* In: ctx: pointer to a context object, initialized for signing |
|||
* (cannot be NULL) |
|||
* msg32: the 32-byte message hash assumed to be signed (cannot |
|||
* be NULL) |
|||
* sec32: the 32-byte private key (cannot be NULL) |
|||
* noncefp: pointer to a nonce generation function. If NULL, |
|||
* secp256k1_nonce_function_default is used |
|||
* noncedata: pointer to arbitrary data used by the nonce generation |
|||
* function (can be NULL) |
|||
* Out: pubnonce: public side of the nonce (cannot be NULL) |
|||
* privnonce32: private side of the nonce (32 byte) (cannot be NULL) |
|||
* |
|||
* Do not use the output as a private/public key pair for signing/validation. |
|||
*/ |
|||
int secp256k1_schnorr_generate_nonce_pair( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *msg32, |
|||
const unsigned char *sec32, |
|||
secp256k1_nonce_function_t noncefp, |
|||
const void* noncedata, |
|||
secp256k1_pubkey_t *pubnonce, |
|||
unsigned char *privnonce32 |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(6) SECP256K1_ARG_NONNULL(7); |
|||
|
|||
/** Produce a partial Schnorr signature, which can be combined using
|
|||
* secp256k1_schnorr_partial_combine, to end up with a full signature that is |
|||
* verifiable using secp256k1_schnorr_verify. |
|||
* Returns: 1: signature created succesfully. |
|||
* 0: no valid signature exists with this combination of keys, nonces |
|||
* and message (chance around 1 in 2^128) |
|||
* -1: invalid private key, nonce, or public nonces. |
|||
* In: ctx: pointer to context object, initialized for signing (cannot |
|||
* be NULL) |
|||
* msg32: pointer to 32-byte message to sign |
|||
* sec32: pointer to 32-byte private key |
|||
* secnonce32: pointer to 32-byte array containing our nonce |
|||
* pubnonce_others: pointer to pubkey containing the sum of the other's |
|||
* nonces (see secp256k1_ec_pubkey_combine) |
|||
* Out: sig64: pointer to 64-byte array to put partial signature in |
|||
* |
|||
* The intended procedure for creating a multiparty signature is: |
|||
* - Each signer S[i] with private key x[i] and public key Q[i] runs |
|||
* secp256k1_schnorr_generate_nonce_pair to produce a pair (k[i],R[i]) of |
|||
* private/public nonces. |
|||
* - All signers communicate their public nonces to each other (revealing your |
|||
* private nonce can lead to discovery of your private key, so it should be |
|||
* considered secret). |
|||
* - All signers combine all the public nonces they received (excluding their |
|||
* own) using secp256k1_ec_pubkey_combine to obtain an |
|||
* Rall[i] = sum(R[0..i-1,i+1..n]). |
|||
* - All signers produce a partial signature using |
|||
* secp256k1_schnorr_partial_sign, passing in their own private key x[i], |
|||
* their own private nonce k[i], and the sum of the others' public nonces |
|||
* Rall[i]. |
|||
* - All signers communicate their partial signatures to each other. |
|||
* - Someone combines all partial signatures using |
|||
* secp256k1_schnorr_partial_combine, to obtain a full signature. |
|||
* - The resulting signature is validatable using secp256k1_schnorr_verify, with |
|||
* public key equal to the result of secp256k1_ec_pubkey_combine of the |
|||
* signers' public keys (sum(Q[0..n])). |
|||
* |
|||
* Note that secp256k1_schnorr_partial_combine and secp256k1_ec_pubkey_combine |
|||
* function take their arguments in any order, and it is possible to |
|||
* pre-combine several inputs already with one call, and add more inputs later |
|||
* by calling the function again (they are commutative and associative). |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_sign( |
|||
const secp256k1_context_t* ctx, |
|||
const unsigned char *msg32, |
|||
unsigned char *sig64, |
|||
const unsigned char *sec32, |
|||
const unsigned char *secnonce32, |
|||
const secp256k1_pubkey_t *pubnonce_others |
|||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6); |
|||
|
|||
/** Combine multiple Schnorr partial signatures.
|
|||
* Returns: 1: the passed signatures were succesfully combined. |
|||
* 0: the resulting signature is not valid (chance of 1 in 2^256) |
|||
* -1: some inputs were invalid, or the signatures were not created |
|||
* using the same set of nonces |
|||
* In: ctx: pointer to a context object |
|||
* sig64: pointer to a 64-byte array to place the combined signature |
|||
* (cannot be NULL) |
|||
* n: the number of signatures to combine (at least 1) |
|||
* Out: sig64sin: pointer to an array of n pointers to 64-byte input |
|||
* signatures |
|||
*/ |
|||
SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_combine( |
|||
const secp256k1_context_t* ctx, |
|||
unsigned char *sig64, |
|||
int n, |
|||
const unsigned char * const * sig64sin |
|||
) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); |
|||
|
|||
# ifdef __cplusplus |
|||
} |
|||
# endif |
|||
|
|||
#endif |
@ -0,0 +1,31 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013, 2014 Pieter Wuille * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_TESTRAND_H_ |
|||
#define _SECP256K1_TESTRAND_H_ |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
/* A non-cryptographic RNG used only for test infrastructure. */ |
|||
|
|||
/** Seed the pseudorandom number generator for testing. */ |
|||
SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16); |
|||
|
|||
/** Generate a pseudorandom 32-bit number. */ |
|||
static uint32_t secp256k1_rand32(void); |
|||
|
|||
/** Generate a pseudorandom 32-byte array. */ |
|||
static void secp256k1_rand256(unsigned char *b32); |
|||
|
|||
/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */ |
|||
static void secp256k1_rand256_test(unsigned char *b32); |
|||
|
|||
/** Generate a pseudorandom 64-bit integer in the range min..max, inclusive. */ |
|||
static int64_t secp256k1_rands64(uint64_t min, uint64_t max); |
|||
|
|||
#endif |
@ -0,0 +1,77 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013-2015 Pieter Wuille, Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_TESTRAND_IMPL_H_ |
|||
#define _SECP256K1_TESTRAND_IMPL_H_ |
|||
|
|||
#include <stdint.h> |
|||
#include <string.h> |
|||
|
|||
#include "testrand.h" |
|||
#include "hash.h" |
|||
|
|||
static secp256k1_rfc6979_hmac_sha256_t secp256k1_test_rng; |
|||
static uint32_t secp256k1_test_rng_precomputed[8]; |
|||
static int secp256k1_test_rng_precomputed_used = 8; |
|||
|
|||
SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) { |
|||
secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16); |
|||
} |
|||
|
|||
SECP256K1_INLINE static uint32_t secp256k1_rand32(void) { |
|||
if (secp256k1_test_rng_precomputed_used == 8) { |
|||
secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed)); |
|||
secp256k1_test_rng_precomputed_used = 0; |
|||
} |
|||
return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++]; |
|||
} |
|||
|
|||
static void secp256k1_rand256(unsigned char *b32) { |
|||
secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32); |
|||
} |
|||
|
|||
static void secp256k1_rand256_test(unsigned char *b32) { |
|||
int bits=0; |
|||
uint64_t ent = 0; |
|||
int entleft = 0; |
|||
memset(b32, 0, 32); |
|||
while (bits < 256) { |
|||
int now; |
|||
uint32_t val; |
|||
if (entleft < 12) { |
|||
ent |= ((uint64_t)secp256k1_rand32()) << entleft; |
|||
entleft += 32; |
|||
} |
|||
now = 1 + ((ent % 64)*((ent >> 6) % 32)+16)/31; |
|||
val = 1 & (ent >> 11); |
|||
ent >>= 12; |
|||
entleft -= 12; |
|||
while (now > 0 && bits < 256) { |
|||
b32[bits / 8] |= val << (bits % 8); |
|||
now--; |
|||
bits++; |
|||
} |
|||
} |
|||
} |
|||
|
|||
SECP256K1_INLINE static int64_t secp256k1_rands64(uint64_t min, uint64_t max) { |
|||
uint64_t range; |
|||
uint64_t r; |
|||
uint64_t clz; |
|||
VERIFY_CHECK(max >= min); |
|||
if (max == min) { |
|||
return min; |
|||
} |
|||
range = max - min; |
|||
clz = secp256k1_clz64_var(range); |
|||
do { |
|||
r = ((uint64_t)secp256k1_rand32() << 32) | secp256k1_rand32(); |
|||
r >>= clz; |
|||
} while (r > range); |
|||
return min + (int64_t)r; |
|||
} |
|||
|
|||
#endif |
File diff suppressed because it is too large
@ -0,0 +1,133 @@ |
|||
/**********************************************************************
|
|||
* Copyright (c) 2013-2015 Pieter Wuille, Gregory Maxwell * |
|||
* Distributed under the MIT software license, see the accompanying * |
|||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|||
**********************************************************************/ |
|||
|
|||
#ifndef _SECP256K1_UTIL_H_ |
|||
#define _SECP256K1_UTIL_H_ |
|||
|
|||
#if defined HAVE_CONFIG_H |
|||
#include "libsecp256k1-config.h" |
|||
#endif |
|||
|
|||
#include <stdlib.h> |
|||
#include <stdint.h> |
|||
#include <stdio.h> |
|||
|
|||
typedef struct { |
|||
void (*fn)(const char *text, void* data); |
|||
void* data; |
|||
} callback_t; |
|||
|
|||
#ifdef DETERMINISTIC |
|||
#define TEST_FAILURE(msg) do { \ |
|||
fprintf(stderr, "%s\n", msg); \ |
|||
abort(); \ |
|||
} while(0); |
|||
#else |
|||
#define TEST_FAILURE(msg) do { \ |
|||
fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg); \ |
|||
abort(); \ |
|||
} while(0) |
|||
#endif |
|||
|
|||
#ifdef HAVE_BUILTIN_EXPECT |
|||
#define EXPECT(x,c) __builtin_expect((x),(c)) |
|||
#else |
|||
#define EXPECT(x,c) (x) |
|||
#endif |
|||
|
|||
#ifdef DETERMINISTIC |
|||
#define CHECK(cond) do { \ |
|||
if (EXPECT(!(cond), 0)) { \ |
|||
TEST_FAILURE("test condition failed"); \ |
|||
} \ |
|||
} while(0) |
|||
#else |
|||
#define CHECK(cond) do { \ |
|||
if (EXPECT(!(cond), 0)) { \ |
|||
TEST_FAILURE("test condition failed: " #cond); \ |
|||
} \ |
|||
} while(0) |
|||
#endif |
|||
|
|||
/* Like assert(), but when VERIFY is defined, and side-effect safe. */ |
|||
#ifdef VERIFY |
|||
#define VERIFY_CHECK CHECK |
|||
#define VERIFY_SETUP(stmt) do { stmt; } while(0) |
|||
#else |
|||
#define VERIFY_CHECK(cond) do { (void)(cond); } while(0) |
|||
#define VERIFY_SETUP(stmt) |
|||
#endif |
|||
|
|||
static SECP256K1_INLINE void *checked_malloc(const callback_t* cb, size_t size) { |
|||
void *ret = malloc(size); |
|||
if (ret == NULL) { |
|||
cb->fn("Out of memory", cb->data); |
|||
} |
|||
return ret; |
|||
} |
|||
|
|||
/* Extract the sign of an int64, take the abs and return a uint64, constant time. */ |
|||
SECP256K1_INLINE static int secp256k1_sign_and_abs64(uint64_t *out, int64_t in) { |
|||
uint64_t mask0, mask1; |
|||
int ret; |
|||
ret = in < 0; |
|||
mask0 = ret + ~((uint64_t)0); |
|||
mask1 = ~mask0; |
|||
*out = (uint64_t)in; |
|||
*out = (*out & mask0) | ((~*out + 1) & mask1); |
|||
return ret; |
|||
} |
|||
|
|||
SECP256K1_INLINE static int secp256k1_clz64_var(uint64_t x) { |
|||
int ret; |
|||
if (!x) { |
|||
return 64; |
|||
} |
|||
# if defined(HAVE_BUILTIN_CLZLL) |
|||
ret = __builtin_clzll(x); |
|||
# else |
|||
/*FIXME: debruijn fallback. */ |
|||
for (ret = 0; ((x & (1ULL << 63)) == 0); x <<= 1, ret++); |
|||
# endif |
|||
return ret; |
|||
|
|||
} |
|||
|
|||
/* Macro for restrict, when available and not in a VERIFY build. */ |
|||
#if defined(SECP256K1_BUILD) && defined(VERIFY) |
|||
# define SECP256K1_RESTRICT |
|||
#else |
|||
# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) |
|||
# if SECP256K1_GNUC_PREREQ(3,0) |
|||
# define SECP256K1_RESTRICT __restrict__ |
|||
# elif (defined(_MSC_VER) && _MSC_VER >= 1400) |
|||
# define SECP256K1_RESTRICT __restrict |
|||
# else |
|||
# define SECP256K1_RESTRICT |
|||
# endif |
|||
# else |
|||
# define SECP256K1_RESTRICT restrict |
|||
# endif |
|||
#endif |
|||
|
|||
#if defined(_WIN32) |
|||
# define I64FORMAT "I64d" |
|||
# define I64uFORMAT "I64u" |
|||
#else |
|||
# define I64FORMAT "lld" |
|||
# define I64uFORMAT "llu" |
|||
#endif |
|||
|
|||
#if defined(HAVE___INT128) |
|||
# if defined(__GNUC__) |
|||
# define SECP256K1_GNUC_EXT __extension__ |
|||
# else |
|||
# define SECP256K1_GNUC_EXT |
|||
# endif |
|||
SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t; |
|||
#endif |
|||
|
|||
#endif |
After Width: | Height: | Size: 2.5 KiB |
File diff suppressed because it is too large
@ -1,12 +0,0 @@ |
|||
{ |
|||
"program": { |
|||
"portable": { |
|||
"pnacl-translate": { |
|||
"url": "iguana.pexe" |
|||
}, |
|||
"pnacl-debug": { |
|||
"url": "iguana_unstripped.bc" |
|||
} |
|||
} |
|||
} |
|||
} |
Binary file not shown.
Binary file not shown.
Loading…
Reference in new issue