[Python-modules-commits] [python-mnemonic] 05/08: Import python-mnemonic_0.15.orig.tar.gz
Tristan Seligmann
mithrandi at moszumanska.debian.org
Sun Nov 20 21:54:00 UTC 2016
This is an automated email from the git hooks/post-receive script.
mithrandi pushed a commit to branch master
in repository python-mnemonic.
commit 359482b4c39554de6a304c54b2bdd8edf9748baf
Author: Tristan Seligmann <mithrandi at mithrandi.net>
Date: Sun Nov 20 23:13:09 2016 +0200
Import python-mnemonic_0.15.orig.tar.gz
---
PKG-INFO | 2 +-
README | 33 -
README.rst | 24 +
mnemonic.egg-info/PKG-INFO | 2 +-
mnemonic.egg-info/SOURCES.txt | 4 +-
mnemonic/__init__.py | 1 +
mnemonic/mnemonic.py | 215 ++++---
mnemonic/secretsharing.py | 108 ++++
mnemonic/shamir.py | 72 +++
mnemonic/wordlist/japanese.txt | 1288 ++++++++++++++++++++--------------------
setup.py | 2 +-
11 files changed, 987 insertions(+), 764 deletions(-)
diff --git a/PKG-INFO b/PKG-INFO
index 1a637a0..8de08a0 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: mnemonic
-Version: 0.12
+Version: 0.15
Summary: Implementation of Bitcoin BIP-0039
Home-page: https://github.com/trezor/python-mnemonic
Author: Bitcoin TREZOR
diff --git a/README b/README
deleted file mode 100644
index dc5a179..0000000
--- a/README
+++ /dev/null
@@ -1,33 +0,0 @@
-<pre>
- BIP: BIP-0039
- Title: Mnemonic code for generating deterministic keys
- Authors: Marek Palatinus <slush at satoshilabs.com>
- Pavol Rusnak <stick at satoshilabs.com>
- ThomasV <thomasv at bitcointalk.org>
- Aaron Voisine <voisine at gmail.com>
- Sean Bowe <ewillbefull at gmail.com>
- Status: Draft
- Type: Standards Track
- Created: 10-09-2013
-</pre>
-
-==Abstract==
-
-This BIP describes the implementation of a mnemonic code or mnemonic sentence --
-a group of easy to remember words -- for the generation of deterministic wallets.
-
-It consists of two parts: generating the mnenomic, and converting it into a
-binary seed. This seed can be later used to generate deterministic wallets using
-BIP-0032 or similar methods.
-
-==BIP paper==
-
-See https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki for full
-specification
-
-==Reference Implementation==
-
-Reference implementation including wordlists is available from
-
-http://github.com/trezor/python-mnemonic
-
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..975343e
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,24 @@
+python-mnemonic
+===============
+
+.. image:: https://travis-ci.org/trezor/python-mnemonic.svg?branch=master
+ :target: https://travis-ci.org/trezor/python-mnemonic
+
+Reference implementation of BIP-0039: Mnemonic code for generating
+deterministic keys
+
+Abstract
+--------
+
+This BIP describes the implementation of a mnemonic code or mnemonic sentence --
+a group of easy to remember words -- for the generation of deterministic wallets.
+
+It consists of two parts: generating the mnenomic, and converting it into a
+binary seed. This seed can be later used to generate deterministic wallets using
+BIP-0032 or similar methods.
+
+BIP Paper
+---------
+
+See https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki
+for full specification
diff --git a/mnemonic.egg-info/PKG-INFO b/mnemonic.egg-info/PKG-INFO
index 1a637a0..8de08a0 100644
--- a/mnemonic.egg-info/PKG-INFO
+++ b/mnemonic.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: mnemonic
-Version: 0.12
+Version: 0.15
Summary: Implementation of Bitcoin BIP-0039
Home-page: https://github.com/trezor/python-mnemonic
Author: Bitcoin TREZOR
diff --git a/mnemonic.egg-info/SOURCES.txt b/mnemonic.egg-info/SOURCES.txt
index 0f841d8..a7f201e 100644
--- a/mnemonic.egg-info/SOURCES.txt
+++ b/mnemonic.egg-info/SOURCES.txt
@@ -1,7 +1,9 @@
-README
+README.rst
setup.py
mnemonic/__init__.py
mnemonic/mnemonic.py
+mnemonic/secretsharing.py
+mnemonic/shamir.py
mnemonic.egg-info/PKG-INFO
mnemonic.egg-info/SOURCES.txt
mnemonic.egg-info/dependency_links.txt
diff --git a/mnemonic/__init__.py b/mnemonic/__init__.py
index 6adf6b8..094258b 100644
--- a/mnemonic/__init__.py
+++ b/mnemonic/__init__.py
@@ -1 +1,2 @@
from .mnemonic import Mnemonic
+from .shamir import Shamir
diff --git a/mnemonic/mnemonic.py b/mnemonic/mnemonic.py
index 4eaaf55..374f11b 100644
--- a/mnemonic/mnemonic.py
+++ b/mnemonic/mnemonic.py
@@ -20,97 +20,146 @@
#
import binascii
+import bisect
import hashlib
import hmac
+import itertools
import os
import sys
import unicodedata
-
from pbkdf2 import PBKDF2
PBKDF2_ROUNDS = 2048
+class ConfigurationError(Exception):
+ pass
+
+# From <http://tinyurl.com/p54ocsk>
+def binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi
+ hi = hi if hi is not None else len(a) # hi defaults to len(a)
+ pos = bisect.bisect_left(a, x, lo, hi) # find insertion position
+ return (pos if pos != hi and a[pos] == x else -1) # don't walk off the end
class Mnemonic(object):
- def __init__(self, language):
- self.radix = 2048
- with open('%s/%s.txt' % (self._get_directory(), language), 'r') as f:
- self.wordlist = [w.strip() for w in f.readlines()]
- if len(self.wordlist) != self.radix:
- raise Exception('Wordlist should contain %d words, but it contains %d words.' % (self.radix, len(self.wordlist)))
-
- @classmethod
- def _get_directory(cls):
- return os.path.join(os.path.dirname(__file__), 'wordlist')
-
- @classmethod
- def list_languages(cls):
- return [ f.split('.')[0] for f in os.listdir(cls._get_directory()) if f.endswith('.txt') ]
-
- @classmethod
- def normalize_string(cls, txt):
- if isinstance(txt, str if sys.version < '3' else bytes):
- utxt = txt.decode('utf8')
- elif isinstance(txt, unicode if sys.version < '3' else str):
- utxt = txt
- else:
- raise Exception("String value expected")
-
- return unicodedata.normalize('NFKD', utxt)
-
- @classmethod
- def detect_language(cls, code):
- first = code.split(' ')[0]
- languages = cls.list_languages()
-
- for lang in languages:
- mnemo = cls(lang)
- if first in mnemo.wordlist:
- return lang
-
- raise Exception("Language not detected")
-
- def generate(self, strength = 128):
- if strength % 32 > 0:
- raise Exception('Strength should be divisible by 32, but it is not (%d).' % strength)
- return self.to_mnemonic(os.urandom(strength // 8))
-
- def to_mnemonic(self, data):
- if len(data) % 4 > 0:
- raise Exception('Data length in bits should be divisible by 32, but it is not (%d bytes = %d bits).' % (len(data), len(data) * 8))
- h = hashlib.sha256(data).hexdigest()
- b = bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8) + \
- bin(int(h, 16))[2:].zfill(256)[:len(data) * 8 // 32]
- result = []
- for i in range(len(b) // 11):
- idx = int(b[i * 11:(i + 1) * 11], 2)
- result.append(self.wordlist[idx])
- if self.detect_language(' '.join(result)) == 'japanese': # Japanese must be joined by ideographic space.
- result_phrase = '\xe3\x80\x80'.join(result)
- else:
- result_phrase = ' '.join(result)
- return result_phrase
-
- def check(self, mnemonic):
- if self.detect_language(mnemonic.replace('\xe3\x80\x80', ' ')) == 'japanese':
- mnemonic = mnemonic.replace('\xe3\x80\x80', ' ') # Japanese will likely input with ideographic space.
- mnemonic = mnemonic.split(' ')
- if len(mnemonic) % 3 > 0:
- return False
- try:
- idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic)
- b = ''.join(idx)
- except:
- return False
- l = len(b)
- d = b[:l // 33 * 32]
- h = b[-l // 33:]
- nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip('L').zfill(l // 33 * 8))
- nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[:l // 33]
- return h == nh
-
- @classmethod
- def to_seed(cls, mnemonic, passphrase = ''):
- mnemonic = cls.normalize_string(mnemonic)
- passphrase = cls.normalize_string(passphrase)
- return PBKDF2(mnemonic, u'mnemonic' + passphrase, iterations=PBKDF2_ROUNDS, macmodule=hmac, digestmodule=hashlib.sha512).read(64)
+ def __init__(self, language):
+ self.radix = 2048
+ with open('%s/%s.txt' % (self._get_directory(), language), 'r') as f:
+ self.wordlist = [w.strip() for w in f.readlines()]
+ if len(self.wordlist) != self.radix:
+ raise ConfigurationError('Wordlist should contain %d words, but it contains %d words.' % (self.radix, len(self.wordlist)))
+
+ @classmethod
+ def _get_directory(cls):
+ return os.path.join(os.path.dirname(__file__), 'wordlist')
+
+ @classmethod
+ def list_languages(cls):
+ return [f.split('.')[0] for f in os.listdir(cls._get_directory()) if f.endswith('.txt')]
+
+ @classmethod
+ def normalize_string(cls, txt):
+ if isinstance(txt, str if sys.version < '3' else bytes):
+ utxt = txt.decode('utf8')
+ elif isinstance(txt, unicode if sys.version < '3' else str):
+ utxt = txt
+ else:
+ raise TypeError("String value expected")
+
+ return unicodedata.normalize('NFKD', utxt)
+
+ @classmethod
+ def detect_language(cls, code):
+ first = code.split(' ')[0]
+ languages = cls.list_languages()
+
+ for lang in languages:
+ mnemo = cls(lang)
+ if first in mnemo.wordlist:
+ return lang
+
+ raise ConfigurationError("Language not detected")
+
+ def generate(self, strength=128):
+ if strength % 32 > 0:
+ raise ValueError('Strength should be divisible by 32, but it is not (%d).' % strength)
+ return self.to_mnemonic(os.urandom(strength // 8))
+
+ # Adapted from <http://tinyurl.com/oxmn476>
+ def to_entropy(self, words):
+ if not isinstance(words, list):
+ words = words.split(' ')
+ if len(words) % 3 > 0:
+ raise ValueError('Word list size must be multiple of three words.')
+ # Look up all the words in the list and construct the
+ # concatenation of the original entropy and the checksum.
+ concatLenBits = len(words) * 11
+ concatBits = [False] * concatLenBits
+ wordindex = 0
+ for word in words:
+ # Find the words index in the wordlist
+ ndx = binary_search(self.wordlist, word)
+ if ndx < 0:
+ raise LookupError('Unable to find "%s" in word list.' % word)
+ # Set the next 11 bits to the value of the index.
+ for ii in range(11):
+ concatBits[(wordindex * 11) + ii] = (ndx & (1 << (10 - ii))) != 0
+ wordindex += 1
+ checksumLengthBits = concatLenBits // 33
+ entropyLengthBits = concatLenBits - checksumLengthBits
+ # Extract original entropy as bytes.
+ entropy = bytearray(entropyLengthBits // 8)
+ for ii in range(len(entropy)):
+ for jj in range(8):
+ if concatBits[(ii * 8) + jj]:
+ entropy[ii] |= 1 << (7 - jj)
+ # Take the digest of the entropy.
+ hashBytes = hashlib.sha256(entropy).digest()
+ if sys.version < '3':
+ hashBits = list(itertools.chain.from_iterable(([ord(c) & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes)))
+ else:
+ hashBits = list(itertools.chain.from_iterable(([c & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes)))
+ # Check all the checksum bits.
+ for i in range(checksumLengthBits):
+ if concatBits[entropyLengthBits + i] != hashBits[i]:
+ raise ValueError('Failed checksum.')
+ return entropy
+
+ def to_mnemonic(self, data):
+ if len(data) % 4 > 0:
+ raise ValueError('Data length in bits should be divisible by 32, but it is not (%d bytes = %d bits).' % (len(data), len(data) * 8))
+ h = hashlib.sha256(data).hexdigest()
+ b = bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8) + \
+ bin(int(h, 16))[2:].zfill(256)[:len(data) * 8 // 32]
+ result = []
+ for i in range(len(b) // 11):
+ idx = int(b[i * 11:(i + 1) * 11], 2)
+ result.append(self.wordlist[idx])
+ if self.detect_language(' '.join(result)) == 'japanese': # Japanese must be joined by ideographic space.
+ result_phrase = u'\xe3\x80\x80'.join(result)
+ else:
+ result_phrase = ' '.join(result)
+ return result_phrase
+
+ def check(self, mnemonic):
+ if self.detect_language(mnemonic.replace(u'\xe3\x80\x80', ' ')) == 'japanese':
+ mnemonic = mnemonic.replace(u'\xe3\x80\x80', ' ') # Japanese will likely input with ideographic space.
+ mnemonic = mnemonic.split(' ')
+ if len(mnemonic) % 3 > 0:
+ return False
+ try:
+ idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic)
+ b = ''.join(idx)
+ except:
+ return False
+ l = len(b)
+ d = b[:l // 33 * 32]
+ h = b[-l // 33:]
+ nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip('L').zfill(l // 33 * 8))
+ nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[:l // 33]
+ return h == nh
+
+ @classmethod
+ def to_seed(cls, mnemonic, passphrase=''):
+ mnemonic = cls.normalize_string(mnemonic)
+ passphrase = cls.normalize_string(passphrase)
+ return PBKDF2(mnemonic, u'mnemonic' + passphrase, iterations=PBKDF2_ROUNDS, macmodule=hmac, digestmodule=hashlib.sha512).read(64)
diff --git a/mnemonic/secretsharing.py b/mnemonic/secretsharing.py
new file mode 100644
index 0000000..1a71b33
--- /dev/null
+++ b/mnemonic/secretsharing.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+"""
+ Secret Sharing
+ ~~~~~
+
+ :copyright: (c) 2014 by Halfmoon Labs
+ :license: MIT, see LICENSE for more details.
+"""
+
+from random import randint
+
+def egcd(a, b):
+ if a == 0:
+ return (b, 0, 1)
+ else:
+ g, y, x = egcd(b % a, a)
+ return (g, x - (b // a) * y, y)
+
+def mod_inverse(k, prime):
+ k = k % prime
+ if k < 0:
+ r = egcd(prime, -k)[2]
+ else:
+ r = egcd(prime, k)[2]
+ return (prime + r) % prime
+
+def random_polynomial(degree, intercept, upper_bound):
+ """ Generates a random polynomial with positive coefficients.
+ """
+ if degree < 0:
+ raise ValueError('Degree must be a non-negative number.')
+ coefficients = [intercept]
+ for i in range(degree):
+ random_coeff = randint(0, upper_bound-1)
+ coefficients.append(random_coeff)
+ return coefficients
+
+def get_polynomial_points(coefficients, num_points, prime):
+ """ Calculates the first n polynomial points.
+ [ (1, f(1)), (2, f(2)), ... (n, f(n)) ]
+ """
+ points = []
+ for x in range(1, num_points+1):
+ # start with x=1 and calculate the value of y
+ y = coefficients[0]
+ # calculate each term and add it to y, using modular math
+ for i in range(1, len(coefficients)):
+ exponentiation = (x**i) % prime
+ term = (coefficients[i] * exponentiation) % prime
+ y = (y + term) % prime
+ # add the point to the list of points
+ points.append((x, y))
+ return points
+
+def modular_lagrange_interpolation(x, points, prime):
+ # break the points up into lists of x and y values
+ x_values, y_values = zip(*points)
+ # initialize f(x) and begin the calculation: f(x) = SUM( y_i * l_i(x) )
+ f_x = 0
+ for i in range(len(points)):
+ # evaluate the lagrange basis polynomial l_i(x)
+ numerator, denominator = 1, 1
+ for j in range(len(points)):
+ # don't compute a polynomial fraction if i equals j
+ if i == j:
+ continue
+ # compute a fraction and update the existing numerator + denominator
+ numerator = (numerator * (x - x_values[j])) % prime
+ denominator = (denominator * (x_values[i] - x_values[j])) % prime
+ # get the polynomial from the numerator + mod inverse of the denominator
+ lagrange_polynomial = numerator * mod_inverse(denominator, prime)
+ # multiply the current y and the evaluated polynomial and add it to f(x)
+ f_x = (prime + f_x + (y_values[i] * lagrange_polynomial)) % prime
+ return f_x
+
+def secret_int_to_points(secret_int, point_threshold, num_points, prime):
+ """ Split a secret (integer) into shares (pair of integers / x,y coords).
+
+ Sample the points of a random polynomial with the y intercept equal to
+ the secret int.
+ """
+ if point_threshold < 2:
+ raise ValueError("Threshold must be >= 2.")
+ if point_threshold > num_points:
+ raise ValueError("Threshold must be < the total number of points.")
+ if secret_int > prime:
+ raise ValueError("Error! Secret is too long for share calculation!")
+ coefficients = random_polynomial(point_threshold-1, secret_int, prime)
+ points = get_polynomial_points(coefficients, num_points, prime)
+ return points
+
+def points_to_secret_int(points, prime):
+ """ Join int points into a secret int.
+
+ Get the intercept of a random polynomial defined by the given points.
+ """
+ if not isinstance(points, list):
+ raise ValueError("Points must be in list form.")
+ for point in points:
+ if not isinstance(point, tuple) and len(point) == 2:
+ raise ValueError("Each point must be a tuple of two values.")
+ if not isinstance(point[0], int) and \
+ isinstance(point[1], int):
+ raise ValueError("Each value in the point must be an int.")
+ x_values, y_values = zip(*points)
+ free_coefficient = modular_lagrange_interpolation(0, points, prime)
+ secret_int = free_coefficient # the secret int is the free coefficient
+ return secret_int
diff --git a/mnemonic/shamir.py b/mnemonic/shamir.py
new file mode 100644
index 0000000..18bf082
--- /dev/null
+++ b/mnemonic/shamir.py
@@ -0,0 +1,72 @@
+#
+# Copyright (c) 2015 Pavol Rusnak
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+# of the Software, and to permit persons to whom the Software is furnished to do
+# so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+import sys
+import binascii
+from .secretsharing import secret_int_to_points, points_to_secret_int
+from .mnemonic import Mnemonic
+
+class Shamir(object):
+
+ def __init__(self, language):
+ self.mnemo = Mnemonic(language)
+ # see https://primes.utm.edu/lists/2small/ for biggest primes that fit into X bits
+ self.primes = {
+ 15: (2**120 - 119),
+ 19: (2**152 - 17),
+ 23: (2**184 - 33),
+ 27: (2**216 - 377),
+ 31: (2**248 - 237)
+ }
+
+ def split(self, data, m, n):
+ if not len(data) in self.primes.keys():
+ raise Exception('Unknown data length')
+ if m < 2 or m > 15:
+ raise Exception('Invalid M provided')
+ if n < 2 or n > 15:
+ raise Exception('Invalid N provided')
+ prime = self.primes[len(data)]
+ s = secret_int_to_points(int(binascii.hexlify(data), 16), m, n, prime)
+ s = ['%x%x%s' % (m, x[0], ('%x' % x[1]).zfill(len(data) * 2)) for x in s]
+ return [self.mnemo.to_mnemonic(binascii.unhexlify(x)) for x in s]
+
+ def combine(self, shares):
+ words = set([len(x.split(' ')) for x in shares])
+ if len(words) != 1:
+ raise Exception('Inconsistent number of words')
+ datalen = list(words)[0] * 4 // 3 - 1
+ shares = [binascii.hexlify(self.mnemo.to_entropy(x)) for x in shares]
+ if sys.version > '3':
+ if set([int(chr(x[0]), 16) for x in shares]) != set([len(shares)]):
+ raise Exception('Number of shares does not match the threshold')
+ points = [(int(chr(x[1]), 16), int(x[2:], 16)) for x in shares]
+ else:
+ if set([int(x[0], 16) for x in shares]) != set([len(shares)]):
+ raise Exception('Number of shares does not match the threshold')
+ points = [(int(x[1], 16), int(x[2:], 16)) for x in shares]
+ prime = self.primes[datalen]
+ r = points_to_secret_int(points, prime)
+ r = hex(r)[2:]
+ if r.endswith('L'):
+ r = r[:-1]
+ r = r.zfill(datalen * 2)
+ return binascii.unhexlify(r)
diff --git a/mnemonic/wordlist/japanese.txt b/mnemonic/wordlist/japanese.txt
index c4c9dca..fb8501a 100644
--- a/mnemonic/wordlist/japanese.txt
+++ b/mnemonic/wordlist/japanese.txt
@@ -1,19 +1,19 @@
あいこくしん
あいさつ
-あいだ
-あおぞら
+あいだ
+あおぞら
あかちゃん
あきる
-あけがた
+あけがた
あける
-あこがれる
+あこがれる
あさい
あさひ
あしあと
-あじわう
-あずかる
-あずき
-あそぶ
+あじわう
+あずかる
+あずき
+あそぶ
あたえる
あたためる
あたりまえ
@@ -26,44 +26,44 @@
あてな
あてはまる
あひる
-あぶら
-あぶる
+あぶら
+あぶる
あふれる
あまい
-あまど
+あまど
あまやかす
あまり
あみもの
あめりか
あやまる
あゆむ
-あらいぐま
+あらいぐま
あらし
-あらすじ
+あらすじ
あらためる
あらゆる
あらわす
-ありがとう
+ありがとう
あわせる
あわてる
あんい
-あんがい
+あんがい
あんこ
-あんぜん
+あんぜん
あんてい
あんない
あんまり
-いいだす
+いいだす
いおん
-いがい
-いがく
+いがい
+いがく
いきおい
いきなり
いきもの
いきる
-いくじ
-いくぶん
-いけばな
+いくじ
+いくぶん
+いけばな
いけん
いこう
いこく
@@ -71,28 +71,28 @@
いさましい
いさん
いしき
-いじゅう
-いじょう
-いじわる
-いずみ
-いずれ
+いじゅう
+いじょう
+いじわる
+いずみ
+いずれ
いせい
-いせえび
+いせえび
いせかい
いせき
-いぜん
+いぜん
いそうろう
-いそがしい
-いだい
-いだく
-いたずら
+いそがしい
+いだい
+いだく
+いたずら
いたみ
いたりあ
いちおう
-いちじ
-いちど
-いちば
-いちぶ
+いちじ
+いちど
+いちば
+いちぶ
いちりゅう
いつか
いっしゅん
@@ -101,10 +101,10 @@
いったん
いっち
いってい
-いっぽう
-いてざ
+いっぽう
+いてざ
いてん
-いどう
+いどう
いとこ
いない
いなか
@@ -112,9 +112,9 @@
いのち
いのる
いはつ
-いばる
+いばる
いはん
-いびき
+いびき
いひん
いふく
いへん
@@ -123,33 +123,33 @@
いもうと
いもたれ
いもり
-いやがる
+いやがる
いやす
いよかん
いよく
いらい
いらすと
-いりぐち
+いりぐち
いりょう
いれい
いれもの
いれる
-いろえんぴつ
+いろえんぴつ
いわい
いわう
いわかん
-いわば
+いわば
いわゆる
-いんげんまめ
+いんげんまめ
いんさつ
いんしょう
いんよう
うえき
うえる
-うおざ
-うがい
-うかぶ
-うかべる
+うおざ
+うがい
+うかぶ
+うかべる
うきわ
うくらいな
うくれれ
@@ -158,43 +158,43 @@
うけとる
うけもつ
うける
-うごかす
-うごく
+うごかす
+うごく
うこん
-うさぎ
+うさぎ
うしなう
-うしろがみ
+うしろがみ
うすい
-うすぎ
-うすぐらい
+うすぎ
+うすぐらい
うすめる
うせつ
うちあわせ
-うちがわ
+うちがわ
うちき
うちゅう
うっかり
うつくしい
うったえる
うつる
-うどん
-うなぎ
-うなじ
-うなずく
+うどん
+うなぎ
+うなじ
+うなずく
うなる
うねる
うのう
-うぶげ
-うぶごえ
+うぶげ
+うぶごえ
うまれる
うめる
うもう
うやまう
うよく
-うらがえす
-うらぐち
+うらがえす
+うらぐち
うらない
-うりあげ
+うりあげ
うりきれ
うるさい
うれしい
@@ -206,38 +206,38 @@
うんこう
うんちん
うんてん
-うんどう
+うんどう
えいえん
-えいが
+えいが
えいきょう
-えいご
+えいご
えいせい
-えいぶん
+えいぶん
えいよう
えいわ
えおり
-えがお
-えがく
+えがお
+えがく
えきたい
えくせる
えしゃく
えすて
えつらん
-えのぐ
+えのぐ
えほうまき
えほん
えまき
-えもじ
+えもじ
えもの
えらい
-えらぶ
+えらぶ
えりあ
えんえん
えんかい
-えんぎ
-えんげき
+えんぎ
+えんげき
えんしゅう
-えんぜつ
+えんぜつ
えんそく
えんちょう
えんとつ
@@ -247,27 +247,27 @@
おいつく
おうえん
おうさま
-おうじ
+おうじ
おうせつ
おうたい
おうふく
-おうべい
+おうべい
おうよう
おえる
おおい
おおう
-おおどおり
+おおどおり
おおや
おおよそ
おかえり
-おかず
-おがむ
+おかず
+おがむ
おかわり
-おぎなう
+おぎなう
おきる
おくさま
-おくじょう
-おくりがな
+おくじょう
+おくりがな
おくる
おくれる
おこす
@@ -278,105 +278,105 @@
おさめる
おしいれ
おしえる
-おじぎ
-おじさん
+おじぎ
+おじさん
おしゃれ
おそらく
おそわる
-おたがい
+おたがい
おたく
-おだやか
+おだやか
おちつく
おっと
おつり
-おでかけ
+おでかけ
おとしもの
おとなしい
-おどり
-おどろかす
-おばさん
+おどり
+おどろかす
+おばさん
おまいり
-おめでとう
-おもいで
+おめでとう
+おもいで
おもう
おもたい
おもちゃ
おやつ
-おやゆび
-およぼす
-おらんだ
+おやゆび
+およぼす
+おらんだ
おろす
-おんがく
+おんがく
おんけい
おんしゃ
おんせん
-おんだん
+おんだん
おんちゅう
-おんどけい
+おんどけい
かあつ
-かいが
-がいき
-がいけん
-がいこう
+かいが
+がいき
+がいけん
+がいこう
かいさつ
かいしゃ
かいすいよく
-かいぜん
-かいぞうど
+かいぜん
+かいぞうど
かいつう
かいてん
かいとう
かいふく
-がいへき
+がいへき
かいほう
かいよう
-がいらい
+がいらい
かいわ
かえる
かおり
かかえる
-かがく
-かがし
-かがみ
-かくご
... 2115 lines suppressed ...
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/python-mnemonic.git
More information about the Python-modules-commits
mailing list