store mnemonic words in python files instead of txt, this avoids packaging issues

This commit is contained in:
Lex Berezhny 2018-09-17 22:37:00 -04:00
parent 94b920ab2a
commit eedc9bf9ed
13 changed files with 9843 additions and 9873 deletions

View file

@ -1,5 +1,3 @@
import os
import re
from setuptools import setup, find_packages from setuptools import setup, find_packages
import torba import torba
@ -26,7 +24,6 @@ setup(
'Topic :: Utilities', 'Topic :: Utilities',
), ),
packages=find_packages(exclude=('tests',)), packages=find_packages(exclude=('tests',)),
include_package_data=True,
python_requires='>=3.6', python_requires='>=3.6',
install_requires=( install_requires=(
'twisted', 'twisted',

View file

@ -1,11 +1,10 @@
# Copyright (C) 2014 Thomas Voegtlin # Copyright (C) 2014 Thomas Voegtlin
# Copyright (C) 2018 LBRY Inc. # Copyright (C) 2018 LBRY Inc.
import os
import io
import hmac import hmac
import math import math
import hashlib import hashlib
import importlib
import unicodedata import unicodedata
import string import string
from binascii import hexlify from binascii import hexlify
@ -77,28 +76,20 @@ def normalize_text(seed):
return seed return seed
def load_words(filename): def load_words(language_name):
path = os.path.join(os.path.dirname(__file__), 'words', filename) language_module = importlib.import_module('torba.words.'+language_name)
with io.open(path, 'r', encoding='utf-8') as f: return list(map(
s = f.read().strip() lambda s: unicodedata.normalize('NFKD', s),
s = unicodedata.normalize('NFKD', s) language_module.words
lines = s.split('\n') ))
words = []
for line in lines:
line = line.split('#')[0]
line = line.strip(' \r')
assert ' ' not in line
if line:
words.append(line)
return words
FILE_NAMES = { LANGUAGE_NAMES = {
'en': 'english.txt', 'en': 'english',
'es': 'spanish.txt', 'es': 'spanish',
'ja': 'japanese.txt', 'ja': 'japanese',
'pt': 'portuguese.txt', 'pt': 'portuguese',
'zh': 'chinese_simplified.txt' 'zh': 'chinese_simplified'
} }
@ -107,8 +98,8 @@ class Mnemonic:
# Mnemonic phrase uses a hash based checksum, instead of a words-dependent checksum # Mnemonic phrase uses a hash based checksum, instead of a words-dependent checksum
def __init__(self, lang='en'): def __init__(self, lang='en'):
filename = FILE_NAMES.get(lang, 'english.txt') language_name = LANGUAGE_NAMES.get(lang, 'english')
self.words = load_words(filename) self.words = load_words(language_name)
@staticmethod @staticmethod
def mnemonic_to_seed(mnemonic, passphrase=u''): def mnemonic_to_seed(mnemonic, passphrase=u''):

0
torba/words/__init__.py Normal file
View file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

2050
torba/words/english.py Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

2050
torba/words/japanese.py Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

1628
torba/words/portuguese.py Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

2050
torba/words/spanish.py Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff