|
|
@ -21,6 +21,7 @@ import hmac |
|
|
|
import math |
|
|
|
import hashlib |
|
|
|
import unicodedata |
|
|
|
import string |
|
|
|
|
|
|
|
import ecdsa |
|
|
|
import pbkdf2 |
|
|
@ -37,13 +38,10 @@ filenames = { |
|
|
|
'pt':'portuguese.txt', |
|
|
|
} |
|
|
|
|
|
|
|
def remove_accents(input_str): |
|
|
|
nkfd_form = unicodedata.normalize('NFKD', unicode(input_str)) |
|
|
|
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)]) |
|
|
|
|
|
|
|
|
|
|
|
class Mnemonic(object): |
|
|
|
# Seed derivation follows BIP39 |
|
|
|
# Seed derivation no longer follows BIP39 |
|
|
|
# Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum |
|
|
|
|
|
|
|
def __init__(self, lang='en'): |
|
|
@ -68,9 +66,10 @@ class Mnemonic(object): |
|
|
|
|
|
|
|
@classmethod |
|
|
|
def prepare_seed(self, seed): |
|
|
|
# remove accents to tolerate typos |
|
|
|
seed = unicode(remove_accents(seed.strip())) |
|
|
|
seed = unicodedata.normalize('NFKD', seed) |
|
|
|
# normalize |
|
|
|
seed = unicodedata.normalize('NFKD', unicode(seed)) |
|
|
|
# remove accents and whitespaces |
|
|
|
seed = u''.join([c for c in seed if not unicodedata.combining(c) and not c in string.whitespace]) |
|
|
|
return seed |
|
|
|
|
|
|
|
def mnemonic_encode(self, i): |
|
|
@ -113,7 +112,7 @@ class Mnemonic(object): |
|
|
|
assert i == self.mnemonic_decode(seed) |
|
|
|
if is_old_seed(seed): |
|
|
|
continue |
|
|
|
if is_new_seed(self.prepare_seed(seed), prefix): |
|
|
|
if is_new_seed(seed, prefix): |
|
|
|
break |
|
|
|
print_error('%d words'%len(seed.split())) |
|
|
|
return seed |
|
|
|