forked from LBRYCommunity/lbry-sdk
refactored mnemonic.py
This commit is contained in:
parent
391b95fd12
commit
d488bfd9d4
9 changed files with 88 additions and 162 deletions
|
@ -1,159 +1,57 @@
|
|||
# Copyright (C) 2014 Thomas Voegtlin
|
||||
# Copyright (C) 2018 LBRY Inc.
|
||||
|
||||
import hmac
|
||||
import math
|
||||
import hashlib
|
||||
import importlib
|
||||
import asyncio
|
||||
import unicodedata
|
||||
import string
|
||||
from binascii import hexlify
|
||||
from secrets import randbelow
|
||||
|
||||
import pbkdf2
|
||||
from secrets import randbits
|
||||
|
||||
from lbry.crypto.hash import hmac_sha512
|
||||
from .words import english
|
||||
|
||||
# The hash of the mnemonic seed must begin with this
|
||||
SEED_PREFIX = b'01' # Standard wallet
|
||||
SEED_PREFIX_2FA = b'101' # Two-factor authentication
|
||||
SEED_PREFIX_SW = b'100' # Segwit wallet
|
||||
|
||||
# http://www.asahi-net.or.jp/~ax2s-kmtn/ref/unicode/e_asia.html
|
||||
CJK_INTERVALS = [
|
||||
(0x4E00, 0x9FFF, 'CJK Unified Ideographs'),
|
||||
(0x3400, 0x4DBF, 'CJK Unified Ideographs Extension A'),
|
||||
(0x20000, 0x2A6DF, 'CJK Unified Ideographs Extension B'),
|
||||
(0x2A700, 0x2B73F, 'CJK Unified Ideographs Extension C'),
|
||||
(0x2B740, 0x2B81F, 'CJK Unified Ideographs Extension D'),
|
||||
(0xF900, 0xFAFF, 'CJK Compatibility Ideographs'),
|
||||
(0x2F800, 0x2FA1D, 'CJK Compatibility Ideographs Supplement'),
|
||||
(0x3190, 0x319F, 'Kanbun'),
|
||||
(0x2E80, 0x2EFF, 'CJK Radicals Supplement'),
|
||||
(0x2F00, 0x2FDF, 'CJK Radicals'),
|
||||
(0x31C0, 0x31EF, 'CJK Strokes'),
|
||||
(0x2FF0, 0x2FFF, 'Ideographic Description Characters'),
|
||||
(0xE0100, 0xE01EF, 'Variation Selectors Supplement'),
|
||||
(0x3100, 0x312F, 'Bopomofo'),
|
||||
(0x31A0, 0x31BF, 'Bopomofo Extended'),
|
||||
(0xFF00, 0xFFEF, 'Halfwidth and Fullwidth Forms'),
|
||||
(0x3040, 0x309F, 'Hiragana'),
|
||||
(0x30A0, 0x30FF, 'Katakana'),
|
||||
(0x31F0, 0x31FF, 'Katakana Phonetic Extensions'),
|
||||
(0x1B000, 0x1B0FF, 'Kana Supplement'),
|
||||
(0xAC00, 0xD7AF, 'Hangul Syllables'),
|
||||
(0x1100, 0x11FF, 'Hangul Jamo'),
|
||||
(0xA960, 0xA97F, 'Hangul Jamo Extended A'),
|
||||
(0xD7B0, 0xD7FF, 'Hangul Jamo Extended B'),
|
||||
(0x3130, 0x318F, 'Hangul Compatibility Jamo'),
|
||||
(0xA4D0, 0xA4FF, 'Lisu'),
|
||||
(0x16F00, 0x16F9F, 'Miao'),
|
||||
(0xA000, 0xA48F, 'Yi Syllables'),
|
||||
(0xA490, 0xA4CF, 'Yi Radicals'),
|
||||
]
|
||||
from . import words
|
||||
|
||||
|
||||
def is_cjk(c):
|
||||
n = ord(c)
|
||||
for start, end, _ in CJK_INTERVALS:
|
||||
if start <= n <= end:
|
||||
return True
|
||||
def get_languages():
|
||||
return words.languages
|
||||
|
||||
|
||||
def normalize(mnemonic: str) -> str:
|
||||
return ' '.join(unicodedata.normalize('NFKD', mnemonic).lower().split())
|
||||
|
||||
|
||||
def is_valid(language, mnemonic):
|
||||
local_words = getattr(words, language)
|
||||
for word in normalize(mnemonic).split():
|
||||
if word not in local_words:
|
||||
return False
|
||||
return bool(mnemonic)
|
||||
|
||||
|
||||
def normalize_text(seed):
|
||||
seed = unicodedata.normalize('NFKD', seed)
|
||||
seed = seed.lower()
|
||||
# remove accents
|
||||
seed = ''.join([c for c in seed if not unicodedata.combining(c)])
|
||||
# normalize whitespaces
|
||||
seed = ' '.join(seed.split())
|
||||
# remove whitespaces between CJK
|
||||
seed = ''.join([
|
||||
seed[i] for i in range(len(seed))
|
||||
if not (seed[i] in string.whitespace and is_cjk(seed[i-1]) and is_cjk(seed[i+1]))
|
||||
])
|
||||
return seed
|
||||
|
||||
|
||||
def load_words(language_name):
|
||||
if language_name == 'english':
|
||||
return english.words
|
||||
language_module = importlib.import_module('lbry.wallet.client.words.'+language_name)
|
||||
return list(map(
|
||||
lambda s: unicodedata.normalize('NFKD', s),
|
||||
language_module.words
|
||||
))
|
||||
|
||||
|
||||
LANGUAGE_NAMES = {
|
||||
'en': 'english',
|
||||
'es': 'spanish',
|
||||
'ja': 'japanese',
|
||||
'pt': 'portuguese',
|
||||
'zh': 'chinese_simplified'
|
||||
}
|
||||
|
||||
|
||||
class Mnemonic:
|
||||
# Seed derivation no longer follows BIP39
|
||||
# Mnemonic phrase uses a hash based checksum, instead of a words-dependent checksum
|
||||
|
||||
def __init__(self, lang='en'):
|
||||
language_name = LANGUAGE_NAMES.get(lang, 'english')
|
||||
self.words = load_words(language_name)
|
||||
|
||||
@staticmethod
|
||||
def mnemonic_to_seed(mnemonic, passphrase=''):
|
||||
pbkdf2_rounds = 2048
|
||||
mnemonic = normalize_text(mnemonic)
|
||||
passphrase = normalize_text(passphrase)
|
||||
return pbkdf2.PBKDF2(
|
||||
mnemonic, passphrase, iterations=pbkdf2_rounds, macmodule=hmac, digestmodule=hashlib.sha512
|
||||
).read(64)
|
||||
|
||||
def mnemonic_encode(self, i):
|
||||
n = len(self.words)
|
||||
words = []
|
||||
while i:
|
||||
x = i%n
|
||||
i = i//n
|
||||
words.append(self.words[x])
|
||||
return ' '.join(words)
|
||||
|
||||
def mnemonic_decode(self, seed):
|
||||
n = len(self.words)
|
||||
words = seed.split()
|
||||
i = 0
|
||||
while words:
|
||||
word = words.pop()
|
||||
k = self.words.index(word)
|
||||
i = i*n + k
|
||||
return i
|
||||
|
||||
def make_seed(self, prefix=SEED_PREFIX, num_bits=132):
|
||||
# increase num_bits in order to obtain a uniform distribution for the last word
|
||||
bpw = math.log(len(self.words), 2)
|
||||
# rounding
|
||||
n = int(math.ceil(num_bits/bpw) * bpw)
|
||||
entropy = 1
|
||||
while 0 < entropy < pow(2, n - bpw):
|
||||
# try again if seed would not contain enough words
|
||||
entropy = randbelow(pow(2, n))
|
||||
def sync_generate(language: str) -> str:
|
||||
local_words = getattr(words, language)
|
||||
entropy = randbits(132)
|
||||
nonce = 0
|
||||
while True:
|
||||
nonce += 1
|
||||
i = entropy + nonce
|
||||
seed = self.mnemonic_encode(i)
|
||||
if i != self.mnemonic_decode(seed):
|
||||
raise Exception('Cannot extract same entropy from mnemonic!')
|
||||
if is_new_seed(seed, prefix):
|
||||
w = []
|
||||
while i:
|
||||
w.append(local_words[i % 2048])
|
||||
i //= 2048
|
||||
seed = ' '.join(w)
|
||||
if hexlify(hmac_sha512(b"Seed version", seed.encode())).startswith(b"01"):
|
||||
break
|
||||
return seed
|
||||
|
||||
|
||||
def is_new_seed(seed, prefix):
|
||||
seed = normalize_text(seed)
|
||||
seed_hash = hexlify(hmac_sha512(b"Seed version", seed.encode('utf8')))
|
||||
return seed_hash.startswith(prefix)
|
||||
def sync_to_seed(mnemonic: str) -> bytes:
|
||||
return hashlib.pbkdf2_hmac('sha512', normalize(mnemonic).encode(), b'lbryum', 2048)
|
||||
|
||||
|
||||
async def generate(language: str) -> str:
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
None, sync_generate, language
|
||||
)
|
||||
|
||||
|
||||
async def to_seed(mnemonic: str) -> bytes:
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
None, sync_to_seed, mnemonic
|
||||
)
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
from .english import words as en
|
||||
from .french import words as fr
|
||||
from .italian import words as it
|
||||
from .japanese import words as ja
|
||||
from .portuguese import words as pt
|
||||
from .spanish import words as es
|
||||
from .chinese_simplified import words as zh
|
||||
languages = 'en', 'fr', 'it', 'ja', 'pt', 'es', 'zh
|
1
lbry/wallet/words/french.py
Normal file
1
lbry/wallet/words/french.py
Normal file
File diff suppressed because one or more lines are too long
1
lbry/wallet/words/italian.py
Normal file
1
lbry/wallet/words/italian.py
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,23 +1,42 @@
|
|||
import unittest
|
||||
from unittest import TestCase
|
||||
from binascii import hexlify
|
||||
|
||||
from lbry.wallet.mnemonic import Mnemonic
|
||||
from lbry.wallet import words
|
||||
from lbry.wallet.mnemonic import (
|
||||
get_languages, is_valid,
|
||||
sync_generate as generate,
|
||||
sync_to_seed as to_seed
|
||||
)
|
||||
|
||||
|
||||
class TestMnemonic(unittest.TestCase):
|
||||
class TestMnemonic(TestCase):
|
||||
|
||||
def test_mnemonic_to_seed(self):
|
||||
seed = Mnemonic.mnemonic_to_seed(mnemonic='foobar', passphrase='torba')
|
||||
def test_get_languages(self):
|
||||
languages = get_languages()
|
||||
self.assertEqual(len(languages), 6)
|
||||
for lang in languages:
|
||||
self.assertEqual(len(getattr(words, lang)), 2048)
|
||||
|
||||
def test_is_valid(self):
|
||||
self.assertFalse(is_valid('en', ''))
|
||||
self.assertFalse(is_valid('en', 'foo'))
|
||||
self.assertFalse(is_valid('en', 'awesomeball'))
|
||||
self.assertTrue(is_valid('en', 'awesome ball'))
|
||||
|
||||
# check normalize works (these are not the same)
|
||||
self.assertTrue(is_valid('ja', 'るいじ りんご'))
|
||||
self.assertTrue(is_valid('ja', 'るいじ りんご'))
|
||||
|
||||
def test_generate(self):
|
||||
self.assertGreaterEqual(len(generate('en').split()), 11)
|
||||
self.assertGreaterEqual(len(generate('ja').split()), 11)
|
||||
|
||||
def test_to_seed(self):
|
||||
self.assertEqual(
|
||||
hexlify(seed),
|
||||
b'475a419db4e991cab14f08bde2d357e52b3e7241f72c6d8a2f92782367feeee9f403dc6a37c26a3f02ab9'
|
||||
b'dec7f5063161eb139cea00da64cd77fba2f07c49ddc'
|
||||
hexlify(to_seed(
|
||||
"carbon smart garage balance margin twelve che"
|
||||
"st sword toast envelope bottom stomach absent"
|
||||
)),
|
||||
b'919455c9f65198c3b0f8a2a656f13bd0ecc436abfabcb6a2a1f063affbccb628'
|
||||
b'230200066117a30b1aa3aec2800ddbd3bf405f088dd7c98ba4f25f58d47e1baf'
|
||||
)
|
||||
|
||||
def test_make_seed_decode_encode(self):
|
||||
iters = 10
|
||||
m = Mnemonic('en')
|
||||
for _ in range(iters):
|
||||
seed = m.make_seed()
|
||||
i = m.mnemonic_decode(seed)
|
||||
self.assertEqual(m.mnemonic_encode(i), seed)
|
||||
|
|
Loading…
Reference in a new issue