[Pkg-fonts-devel] Bug#943761: python3-nototools: fails to install: Sorry: IndentationError: unexpected indent (lint_cmap_reqs.py, line 56)
peter green
plugwash at p10link.net
Wed Dec 11 02:06:44 GMT 2019
Tags 943761 +patch
Thanks
It seems upstream has already fixed this issue, I applied the upstream commit as a quilt patch and it resulted in a package that installed successfully. I also added the missing breaks/replaces.
Debdiff attatched, no immediate intent to NMU.
-------------- next part --------------
diff -Nru nototools-0.2.0/debian/changelog nototools-0.2.0/debian/changelog
--- nototools-0.2.0/debian/changelog 2019-10-21 11:04:18.000000000 +0000
+++ nototools-0.2.0/debian/changelog 2019-12-11 00:44:06.000000000 +0000
@@ -1,3 +1,12 @@
+nototools (0.2.0-1.1) UNRELEASED; urgency=medium
+
+ * Non-maintainer upload.
+ * Apply upstream commit c4a79c79f0da2e6385eab0d89bf5c6546b15e373
+ "More updates to make Python 3 compatible"
+ * Add breaks and replaces on python-nototools.
+
+ -- Peter Michael Green <plugwash at debian.org> Wed, 11 Dec 2019 00:44:06 +0000
+
nototools (0.2.0-1) experimental; urgency=medium
* New upstream release
diff -Nru nototools-0.2.0/debian/control nototools-0.2.0/debian/control
--- nototools-0.2.0/debian/control 2019-10-21 11:04:18.000000000 +0000
+++ nototools-0.2.0/debian/control 2019-12-11 00:44:06.000000000 +0000
@@ -20,6 +20,8 @@
Depends: ${misc:Depends},
${python3:Depends},
unicode-data
+Breaks: python-nototools
+Replaces: python-nototools
Description: font support tools from the Noto Fonts project
Noto is a collection of font families, each visually harmonized across
scripts.
diff -Nru nototools-0.2.0/debian/patches/more-python3-fixes.patch nototools-0.2.0/debian/patches/more-python3-fixes.patch
--- nototools-0.2.0/debian/patches/more-python3-fixes.patch 1970-01-01 00:00:00.000000000 +0000
+++ nototools-0.2.0/debian/patches/more-python3-fixes.patch 2019-12-11 00:43:51.000000000 +0000
@@ -0,0 +1,1866 @@
+commit c4a79c79f0da2e6385eab0d89bf5c6546b15e373
+Author: punchcutter <zarijoscha at gmail.com>
+Date: Wed Oct 23 21:51:41 2019 -0700
+
+ More updates to make Python 3 compatible
+ Added Wancho, Indic Siyaq Numbers, and Mayan Numerals to noto lint
+
+diff --git a/nototools/android_patches.py b/nototools/android_patches.py
+index 67ccc98..fa5f257 100755
+--- a/nototools/android_patches.py
++++ b/nototools/android_patches.py
+@@ -24,6 +24,7 @@ from os import path
+ import shutil
+ import tempfile
+
++from nototools.py23 import unichr
+ from nototools import subset
+ from nototools import coverage
+ from nototools import fix_khmer_and_lao_coverage as merger
+@@ -115,9 +116,9 @@ def _remove_cjk_emoji(cjk_font_names, srcdir, dstdir):
+
+ EMOJI = (
+ [0x26BD, 0x26BE, 0x1F18E]
+- + range(0x1F191, 0x1F19A+1)
++ + list(range(0x1F191, 0x1F19A+1))
+ + [0x1F201, 0x1F21A, 0x1F22F]
+- + range(0x1F232, 0x1F236+1)
++ + list(range(0x1F232, 0x1F236+1))
+ + [0x1F238, 0x1F239, 0x1F23A, 0x1F250, 0x1F251]
+ )
+
+diff --git a/nototools/autofix_for_phase3.py b/nototools/autofix_for_phase3.py
+index e50d709..0a083ee 100755
+--- a/nototools/autofix_for_phase3.py
++++ b/nototools/autofix_for_phase3.py
+@@ -27,7 +27,6 @@ hinted. We also put more info into the version string.
+
+ import argparse
+ import datetime
+-import glob
+ import os
+ from os import path
+ import re
+@@ -35,6 +34,7 @@ import subprocess
+
+ from fontTools import ttLib
+
++from nototools.py23 import basestring
+ from nototools import font_data
+ from nototools import noto_data
+ from nototools import noto_fonts
+@@ -249,7 +249,7 @@ def get_new_version(font, relfont, nversion):
+ return rversion
+ else:
+ n_mm, n_is_phase_2 = _version_str_to_mm(nversion)
+- if n_is_phase2:
++ if n_is_phase_2:
+ raise Exception('bad phase 3 minor version ("%s")' % nversion)
+ if rversion is not None:
+ if n_mm < r_mm:
+diff --git a/nototools/chart/chart.py b/nototools/chart/chart.py
+index 239735d..c0a2d41 100755
+--- a/nototools/chart/chart.py
++++ b/nototools/chart/chart.py
+@@ -76,7 +76,7 @@ num_rows = len(rows)
+ width = NUM_COLS * CELL_SIZE + 2 * (2 * MARGIN + LABEL_WIDTH)
+ height = num_rows * CELL_SIZE + 2 * MARGIN
+
+-print "Generating %s at %.3gx%.3gin" % (outfile, width/72., height/72.)
++print("Generating %s at %.3gx%.3gin" % (outfile, width/72., height/72.))
+ if outfile.endswith(".pdf"):
+ surface = cairo.PDFSurface(outfile, width, height)
+ elif outfile.endswith(".ps"):
+diff --git a/nototools/chart/pycairoft.py b/nototools/chart/pycairoft.py
+index e62a09e..6cb938a 100644
+--- a/nototools/chart/pycairoft.py
++++ b/nototools/chart/pycairoft.py
+@@ -34,7 +34,7 @@ def create_cairo_font_face_for_file (filename, faceindex=0, loadoptions=0):
+ # initialize freetype
+ _ft_lib = ctypes.c_void_p ()
+ if FT_Err_Ok != _freetype_so.FT_Init_FreeType (ctypes.byref (_ft_lib)):
+- raise "Error initialising FreeType library."
++ raise Exception("Error initialising FreeType library.")
+
+ _surface = cairo.ImageSurface (cairo.FORMAT_A8, 0, 0)
+
+diff --git a/nototools/cldr_data.py b/nototools/cldr_data.py
+index a5a0d45..38b6b8e 100755
+--- a/nototools/cldr_data.py
++++ b/nototools/cldr_data.py
+@@ -22,6 +22,7 @@ import re
+ from nototools import unicode_data
+ import xml.etree.cElementTree as ElementTree
+
++from nototools.py23 import unichr, unicode
+ from nototools import extra_locale_data
+
+ TOOLS_DIR = path.abspath(path.join(path.dirname(__file__), os.pardir))
+@@ -49,7 +50,7 @@ def _parse_likely_subtags():
+ from_tag = tag.get('from').replace('_', '-')
+ to_tag = tag.get('to').split('_')
+ _LIKELY_SUBTAGS[from_tag] = to_tag
+- # print 'likely subtag from %s -> %s' % (from_tag, to_tag)
++ # print('likely subtag from %s -> %s' % (from_tag, to_tag))
+
+ _LIKELY_SUBTAGS.update(extra_locale_data.LIKELY_SUBTAGS)
+
+@@ -161,7 +162,7 @@ def _parse_supplemental_data():
+
+ if _USE_EXTRA_LOCALE_DATA:
+ # Supplement lang to script mapping with extra locale data
+- for lang, scripts in extra_locale_data.LANG_TO_SCRIPTS.iteritems():
++ for lang, scripts in extra_locale_data.LANG_TO_SCRIPTS.items():
+ _LANG_TO_SCRIPTS[lang] |= set(scripts)
+
+ # Use extra locale data's likely subtag info to change the supplemental
+@@ -194,7 +195,7 @@ def _parse_supplemental_data():
+ _REGION_TO_LANG_SCRIPTS[region].add(lang_script)
+ _LANG_TO_REGIONS[lang].add(region)
+
+- for tup in extra_locale_data.REGION_TO_LANG_SCRIPTS.iteritems():
++ for tup in extra_locale_data.REGION_TO_LANG_SCRIPTS.items():
+ territory, lang_scripts = tup
+ _REGION_TO_LANG_SCRIPTS[territory] |= set(lang_scripts)
+ for lang_script in lang_scripts:
+@@ -695,7 +696,7 @@ def _init_lang_scr_to_lit_pops():
+
+ # make it a bit more useful by sorting the value list in order of decreasing
+ # population and converting the list to a tuple
+- for lang_scr, values in tmp_map.iteritems():
++ for lang_scr, values in tmp_map.items():
+ _lang_scr_to_lit_pops[lang_scr] = tuple(
+ sorted(values, key=lambda x: (-x[1], x[0])))
+
+diff --git a/nototools/cmap_block_coverage.py b/nototools/cmap_block_coverage.py
+index 214aae7..71fe0a1 100755
+--- a/nototools/cmap_block_coverage.py
++++ b/nototools/cmap_block_coverage.py
+@@ -196,7 +196,7 @@ def _summarize_block(block, block_count, defined_count, script_counts):
+
+ lower_limit = int(defined_count / 10)
+ groups = collections.defaultdict(list)
+- for script, count in script_counts.iteritems():
++ for script, count in script_counts.items():
+ groupnum = int(count / 5) * 5
+ if groupnum < lower_limit:
+ groupnum = 0
+diff --git a/nototools/collect_cldr_punct.py b/nototools/collect_cldr_punct.py
+index 87bf924..56c328f 100755
+--- a/nototools/collect_cldr_punct.py
++++ b/nototools/collect_cldr_punct.py
+@@ -27,6 +27,7 @@ from os import path
+ import sys
+ import xml.etree.cElementTree as ET
+
++from nototools.py23 import unichr
+ from nototools import cldr_data
+ from nototools import tool_utils
+ from nototools import unicode_data
+diff --git a/nototools/compare_cmap_data.py b/nototools/compare_cmap_data.py
+index 319b0eb..2eb0674 100755
+--- a/nototools/compare_cmap_data.py
++++ b/nototools/compare_cmap_data.py
+@@ -227,7 +227,7 @@ def report_compare(compare_result, detailed=True):
+ target_map = cmap_data.create_map_from_table(target_cmap_data.table)
+
+ inverted_target = collections.defaultdict(set)
+- for script, row in target_map.iteritems():
++ for script, row in target_map.items():
+ cps = tool_utils.parse_int_ranges(row.ranges)
+ for cp in cps:
+ inverted_target[cp].add(script)
+diff --git a/nototools/create_image.py b/nototools/create_image.py
+index 1125df1..8573bdf 100755
+--- a/nototools/create_image.py
++++ b/nototools/create_image.py
+@@ -26,11 +26,13 @@ from os import path
+ import string
+
+ from nototools import notoconfig
++from nototools.py23 import basestring
+
+ import cairo
+ import pango
+ import pangocairo
+
++
+ _fonts_conf_template = """<?xml version="1.0"?>
+ <!DOCTYPE fontconfig SYSTEM "fonts.dtd">
+ <fontconfig>
+diff --git a/nototools/data/family_name_info_p3.xml b/nototools/data/family_name_info_p3.xml
+index 2a546ae..62e8959 100644
+--- a/nototools/data/family_name_info_p3.xml
++++ b/nototools/data/family_name_info_p3.xml
+@@ -100,6 +100,7 @@
+ <info family="sans-maka" family_name_style="short" include_regular="t" use_preferred="t" />
+ <info family="sans-mand" family_name_style="short" include_regular="t" use_preferred="t" />
+ <info family="sans-mani" family_name_style="very short" include_regular="t" use_preferred="t" />
++ <info family="sans-manu" family_name_style="very short" include_regular="t" use_preferred="t" />
+ <info family="sans-marc" family_name_style="short" include_regular="t" use_preferred="t" />
+ <info family="sans-medf" family_name_style="very short" include_regular="t" use_preferred="t" />
+ <info family="sans-mend" family_name_style="short" include_regular="t" use_preferred="t" />
+@@ -149,6 +150,7 @@
+ <info family="sans-sind" family_name_style="very short" include_regular="t" use_preferred="t" />
+ <info family="sans-sinh" family_name_style="short" include_regular="t" use_preferred="t" />
+ <info family="sans-sinh-ui" family_name_style="very short" include_regular="t" use_preferred="t" />
++ <info family="sans-siyq" family_name_style="very short" include_regular="t" use_preferred="t" />
+ <info family="sans-sogd" family_name_style="short" include_regular="t" use_preferred="t" />
+ <info family="sans-sogo" family_name_style="very short" include_regular="t" use_preferred="t" />
+ <info family="sans-sora" family_name_style="short" include_regular="t" use_preferred="t" />
+@@ -180,6 +182,7 @@
+ <info family="sans-tirh" family_name_style="short" include_regular="t" use_preferred="t" />
+ <info family="sans-ugar" family_name_style="short" include_regular="t" use_preferred="t" />
+ <info family="sans-vaii" family_name_style="short" include_regular="t" use_preferred="t" />
++ <info family="sans-wcho" family_name_style="short" include_regular="t" use_preferred="t" />
+ <info family="sans-wara" family_name_style="very short" include_regular="t" use_preferred="t" />
+ <info family="sans-xpeo" family_name_style="very short" include_regular="t" use_preferred="t" />
+ <info family="sans-xsux" family_name_style="very short" include_regular="t" use_preferred="t" />
+diff --git a/nototools/data/familyname_and_styles.txt b/nototools/data/familyname_and_styles.txt
+index 5c92410..e685f8c 100644
+--- a/nototools/data/familyname_and_styles.txt
++++ b/nototools/data/familyname_and_styles.txt
+@@ -13,6 +13,8 @@ NotoSerif
+ NotoSerifDisplay
+ -- TRBH --
+ NotoSansSymbols
++NotoSansIndicSiyaqNumbers
++NotoSansMayanNumerals
+ -- R --
+ NotoSansSymbols2
+ NotoSansMath
+@@ -114,6 +116,7 @@ NotoSansSiddham
+ NotoSansSignwriting
+ NotoSansTakri
+ NotoSansTirhuta
++NotoSansWancho
+ NotoSansWarangCiti
+ NotoSerifAhom
+ NotoSansAnatolianHieroglyphs
+diff --git a/nototools/data/noto_cmap_phase3.xml b/nototools/data/noto_cmap_phase3.xml
+index b7d9b3f..1c2330c 100644
+--- a/nototools/data/noto_cmap_phase3.xml
++++ b/nototools/data/noto_cmap_phase3.xml
+@@ -9,7 +9,7 @@
+ <th>script,name,count,ranges,xcount,xranges</th>
+ <tr>Adlm,Adlam,97,0000 000d 0020-0021 00a0 061f 0640 204f 25cc 2e41 1e900-1e94a 1e950-1e959 1e95e-1e95f,-1,</tr>
+ <tr>Aghb,Caucasian Albanian,76,0000 000d 0020 00a0 0304 0331 25cc fe20-fe2f 10530-10563 1056f,-1,</tr>
+- <tr>Ahom,Ahom,62,0000 000d 0020 00a0 25cc 11700-11719 1171d-1172b 11730-1173f,-1,</tr>
++ <tr>Ahom,Ahom,63,0000 000d 0020 00a0 25cc 11700-1171a 1171d-1172b 11730-1173f,-1,</tr>
+ <tr>Arab,Arabic,1160,0000 000d 0020-0021 002c-002e 0030-003a 003f 00a0 00ab 00bb 034f 0600-061c 061e-06ff 0750-077f 08a0-08b4 08b6-08bd 08d4-08ff 200b-2011 204f 25cc 2e41 fb50-fbc1 fbd3-fd3f fd50-fd8f fd92-fdc7 fdf0-fdfd fe70-fe74 fe76-fefc,11,0022 0027-0029 2018-2019 201c-201d 2026 2039-203a</tr>
+ <tr>Aran,(Urdu),281,0000 000d 0020-0021 0028-0029 002b-003a 003d 005b-005d 007b-007d 00a0 00ab 00b7 00bb 00d7 00f7 0600-0604 0609-0614 061b-061c 061e-0659 065d-0673 0679-0681 0683-068f 0691 0693 0696 0698-069a 069e 06a1 06a4 06a6 06a9 06ab 06af-06b1 06b3 06b7 06ba-06bc 06be 06c0-06c4 06c6-06c7 06ca 06cc-06cd 06d0 06d2-06d5 06dd-06de 06e0-06e1 06e9 06ee-06f9 06ff 0759 075c 0763 0767-077d 08ff 200b-2011 2013-2014 2018-2019 201c-201d 2039-203a 2212 25cc fbb2-fbc1 fd3e-fd3f fdf2 fdf4 fdfa-fdfd,0,</tr>
+ <tr>Armi,Imperial Aramaic,35,0000 000d 0020 00a0 10840-10855 10857-1085f,0,</tr>
+@@ -45,7 +45,7 @@
+ <tr>Gong,Gunjala Gondi,70,0000 000d 0020 00a0 0964-0965 25cc 11d60-11d65 11d67-11d68 11d6a-11d8e 11d90-11d91 11d93-11d98 11da0-11da9,-1,</tr>
+ <tr>Gonm,Masaram Gondi,82,0000 000d 0020 00a0 0964-0965 25cc 11d00-11d06 11d08-11d09 11d0b-11d36 11d3a 11d3c-11d3d 11d3f-11d47 11d50-11d59,-1,</tr>
+ <tr>Goth,Gothic,36,0000 000d 0020 00a0 0304-0305 0308 0331 25cc 10330-1034a,2,003a 00b7</tr>
+- <tr>Gran,Grantha,120,0000 000d 0020 00a0 0951-0952 0964-0965 0baa 0bb5 0be6-0bf2 1cd0 1cd2-1cd3 1cf2-1cf4 1cf8-1cf9 200c-200d 20f0 25cc 11300-11303 11305-1130c 1130f-11310 11313-11328 1132a-11330 11332-11333 11335-11339 1133c-11344 11347-11348 1134b-1134d 11350 11357 1135d-11363 11366-1136c 11370-11374,-1,</tr>
++ <tr>Gran,Grantha,121,0000 000d 0020 00a0 0951-0952 0964-0965 0baa 0bb5 0be6-0bf2 1cd0 1cd2-1cd3 1cf2-1cf4 1cf8-1cf9 200c-200d 20f0 25cc 11300-11303 11305-1130c 1130f-11310 11313-11328 1132a-11330 11332-11333 11335-11339 1133b-11344 11347-11348 1134b-1134d 11350 11357 1135d-11363 11366-1136c 11370-11374,-1,</tr>
+ <tr>Gujr,Gujarati,158,0000 000d 0020-0023 0025 0027-003f 005b-005f 007b-007e 00a0 00ad 00d7 00f7 0951-0952 0964-0965 0a81-0a83 0a85-0a8d 0a8f-0a91 0a93-0aa8 0aaa-0ab0 0ab2-0ab3 0ab5-0ab9 0abc-0ac5 0ac7-0ac9 0acb-0acd 0ad0 0ae0-0ae3 0ae6-0af1 0af9 200b-200d 2010 2013-2014 2018-2019 201c-201d 2026 20b9 2212 25cc a830-a839,-1,</tr>
+ <tr>Guru,Gurmukhi,153,0000 000d 0020-0023 0025 0027-003f 005b-005f 007b-007e 00a0 00ad 00d7 00f7 0951-0952 0964-0965 0a01-0a03 0a05-0a0a 0a0f-0a10 0a13-0a28 0a2a-0a30 0a32-0a33 0a35-0a36 0a38-0a39 0a3c 0a3e-0a42 0a47-0a48 0a4b-0a4d 0a51 0a59-0a5c 0a5e 0a66-0a75 200b-200d 2010 2013-2014 2018-2019 201c-201d 2026 20b9 2212 25cc 262c a830-a839,0,</tr>
+ <tr>Hano,Hanunoo,31,0000 000d 0020 00a0 1720-1736 200b-200d 25cc,10,00d7 2012-2015 2022 25fb-25fe</tr>
+@@ -78,6 +78,7 @@
+ <tr>Maka,Makasar,29,0000 000d 0020 25cc 11ee0-11ef8,-1,</tr>
+ <tr>Mand,Mandaic,37,0000 000d 0020 00a0 0640 0840-085b 085e 200c-200d 25cc,0,</tr>
+ <tr>Mani,Manichaean,59,0000 000d 0020 00a0 0640 200c-200d 25cc 10ac0-10ae6 10aeb-10af6,-1,</tr>
++ <tr>Manu,Mayan Numerals,25,0000 000d 0020 00a0 1d2e0-1d2f3,-1,</tr>
+ <tr>Marc,Marchen,73,0000 000d 0020 00a0 25cc 11c70-11c8f 11c92-11ca7 11ca9-11cb6,-1,</tr>
+ <tr>Medf,Medefaidrin,96,0000 000d 0020 00a0 25cc 16e40-16e9a,-1,</tr>
+ <tr>Mend,Mende Kikakui,218,0000 000d 0020 00a0 25cc 1e800-1e8c4 1e8c7-1e8d6,-1,</tr>
+@@ -123,6 +124,7 @@
+ <tr>Sidd,Siddham,99,0000 000d 0020 00a0 200c-200d 25cc 11580-115b5 115b8-115dd,-1,</tr>
+ <tr>Sind,Khudawadi,93,0000 000d 0020 002e 003a-003b 00a0 0964-0965 200c-200d 2013-2014 25cc a830-a839 112b0-112ea 112f0-112f9,-1,</tr>
+ <tr>Sinh,Sinhala,169,0000 000d 0020-0023 0025 0027-003f 005b-005f 007b-007e 00a0 00ad 00d7 00f7 0964-0965 0d82-0d83 0d85-0d96 0d9a-0db1 0db3-0dbb 0dbd 0dc0-0dc6 0dca 0dcf-0dd4 0dd6 0dd8-0ddf 0de6-0def 0df2-0df4 200b-200d 2013-2014 2018-2019 201c-201d 2026 2212 25cc 111e1-111f4,-1,</tr>
++ <tr>Siyq,Indic Siyaq Numbers,95,0000 000d 0020 00a0 0627 1ec71-1ecb4 0660-0669 06f0-06f9,-1,</tr>
+ <tr>Sogd,Sogdian,47,0000 000d 0020 00a0 25cc 10f30-10f59,-1,</tr>
+ <tr>Sogo,Old Sogdian,44,0000 000d 0020 00a0 10f00-10f27,-1,</tr>
+ <tr>Sora,Sora Sompeng,47,0000 000d 0020-0021 0028-0029 002c-002e 003b 00a0 2010 110d0-110e8 110f0-110f9,-1,</tr>
+@@ -146,6 +148,7 @@
+ <tr>Tirh,Tirhuta,101,0000 000d 0020 00a0 0964-0965 200c-200d 25cc a830-a839 11480-114c7 114d0-114d9,-1,</tr>
+ <tr>Ugar,Ugaritic,35,0000 000d 0020 00a0 10380-1039d 1039f,0,</tr>
+ <tr>Vaii,Vai,304,0000 000d 0020 00a0 a500-a62b,8,0022 0027 002c-002d 2018-2019 201c-201d</tr>
++ <tr>Wcho,Wancho,79,0000 000d 0020 0022 0027-0029 002c-002f 005b-005d 007b 007d 00a0 201c-201d 25cc 1e2c0-1e2f9 1e2ff,-1,</tr>
+ <tr>Wara,Warang Citi,101,0000 000d 0020-0021 0028-0029 002c-002e 003a-003b 003f 00a0 2013-2014 201c-201d 118a0-118f2 118ff,-1,</tr>
+ <tr>Xpeo,Old Persian,54,0000 000d 0020 00a0 103a0-103c3 103c8-103d5,0,</tr>
+ <tr>Xsux,Cuneiform,1238,0000 000d 0020 00a0 12000-12399 12400-1246e 12470-12474 12480-12543,0,</tr>
+diff --git a/nototools/extract_ohchr_attributions.py b/nototools/extract_ohchr_attributions.py
+index e094f09..fdd980d 100755
+--- a/nototools/extract_ohchr_attributions.py
++++ b/nototools/extract_ohchr_attributions.py
+@@ -69,12 +69,12 @@
+
+
+ import argparse
+-import codecs
+-import HTMLParser as html
++import html.parser as html
+ import re
+
+ from nototools import tool_utils
+
++
+ class ParseOhchr(html.HTMLParser):
+ def __init__(self, trace=False):
+ html.HTMLParser.__init__(self)
+diff --git a/nototools/font_data.py b/nototools/font_data.py
+index 489754f..e6d0cf5 100755
+--- a/nototools/font_data.py
++++ b/nototools/font_data.py
+@@ -18,6 +18,7 @@
+
+ __author__ = 'roozbeh at google.com (Roozbeh Pournader)'
+
++from nototools.py23 import unicode
+ from nototools import opentype_data
+
+ from fontTools.ttLib.tables._n_a_m_e import NameRecord
+@@ -219,7 +220,7 @@ def add_to_cmap(font, mapping):
+ cmap_table = font['cmap']
+ for table in cmap_table.tables:
+ if (table.format, table.platformID, table.platEncID) in UNICODE_CMAPS:
+- for code, glyph in mapping.iteritems():
++ for code, glyph in mapping.items():
+ table.cmap[code] = glyph
+
+
+diff --git a/nototools/generate_coverage_data.py b/nototools/generate_coverage_data.py
+index ab2b2d6..0244c08 100755
+--- a/nototools/generate_coverage_data.py
++++ b/nototools/generate_coverage_data.py
+@@ -61,7 +61,7 @@ def _create_metadata(**kwargs):
+ date = str(kwargs.pop('date', datetime.date.today()))
+ program = str(kwargs.pop('program', 'generate_coverage_data'))
+ arglist = [
+- (k, v) for k, v in sorted(kwargs.iteritems())
++ (k, v) for k, v in sorted(kwargs.items())
+ if v is not None]
+ return MetaData(date, program, arglist)
+
+diff --git a/nototools/generate_dingbats_html.py b/nototools/generate_dingbats_html.py
+index c2b80b2..8079932 100755
+--- a/nototools/generate_dingbats_html.py
++++ b/nototools/generate_dingbats_html.py
+@@ -30,6 +30,7 @@ from nototools import cmap_data
+ from nototools import font_data
+ from nototools import tool_utils
+ from nototools import unicode_data
++from nototools.py23 import basestring
+
+ """Generate html comparison of codepoints in various fonts."""
+
+@@ -869,7 +870,7 @@ def _flagged_name(cp, flag_sets):
+ except:
+ raise Exception('no name for %04X' % cp)
+ flags = []
+- for k, v in sorted(flag_sets.iteritems()):
++ for k, v in sorted(flag_sets.items()):
+ if (cp in v[0]) == v[1]:
+ flags.append(k)
+ if flags:
+diff --git a/nototools/generate_sample_from_exemplar.py b/nototools/generate_sample_from_exemplar.py
+index 83c2c20..293c5e4 100755
+--- a/nototools/generate_sample_from_exemplar.py
++++ b/nototools/generate_sample_from_exemplar.py
+@@ -26,6 +26,7 @@ import re
+ import shutil
+ import xml.etree.cElementTree as ElementTree
+
++from nototools.py23 import unichr
+ from nototools import cldr_data
+ from nototools import create_image
+ from nototools import extra_locale_data
+@@ -423,7 +424,7 @@ def generate_sample_for_script(script, loc_map):
+ num_locales = len(loc_map)
+
+ if num_locales == 1:
+- tag, info = loc_map.iteritems().next()
++ tag, info = loc_map.items().next()
+ exemplars = info[2]
+ ex_len = len(exemplars)
+ info = '%s (1 locale)\nfrom exemplars for %s (%s%d chars)' % (
+diff --git a/nototools/generate_sample_text.py b/nototools/generate_sample_text.py
+index a970df3..bc0a7d6 100755
+--- a/nototools/generate_sample_text.py
++++ b/nototools/generate_sample_text.py
+@@ -19,11 +19,14 @@
+ __author__ = 'roozbeh at google.com (Roozbeh Pournader)'
+
+ import sys
++from nototools.py23 import unichr
++
+
+ def char_rep_to_code(char_rep):
+ """Converts a character representation in hex to its code."""
+ return int(char_rep, 16)
+
++
+ def main(argv):
+ """Outputs a space-separated list of characters based on input ranges."""
+ chars = []
+@@ -36,7 +39,11 @@ def main(argv):
+ else:
+ chars.append(char_rep_to_code(arg))
+ chars = u' '.join([unichr(code) for code in chars])
+- print(chars.encode('UTF-8'))
++ if sys.version_info >= (2, 7):
++ print(chars)
++ else:
++ print(chars.encode('UTF-8'))
++
+
+ if __name__ == '__main__':
+ main(sys.argv)
+diff --git a/nototools/generate_sample_text_html.py b/nototools/generate_sample_text_html.py
+index ff86900..1f9168d 100755
+--- a/nototools/generate_sample_text_html.py
++++ b/nototools/generate_sample_text_html.py
+@@ -48,7 +48,7 @@ def generate_table(filename):
+ f.write('<table>\n')
+ f.write('<tr><th>Script<br/>BCP<th>name<th>type<th>text\n')
+
+- for script, samples in sorted(script_to_samples.iteritems()):
++ for script, samples in sorted(script_to_samples.items()):
+ script_en = cldr_data.get_english_script_name(script)
+ f.write('<tr><th colspan=4>%s\n' % script_en)
+ for bcp, sample_type, sample_text in samples:
+@@ -95,14 +95,14 @@ def _get_script_to_samples():
+ continue
+ script_to_samples[script].append((bcp, sample_type))
+
+- for script, samples in sorted(script_to_samples.iteritems()):
++ for script, samples in sorted(script_to_samples.items()):
+ pref = {}
+ for bcp, sample_type in samples:
+ if bcp not in pref or sample_type == 'udhr':
+ pref[bcp] = sample_type
+
+ full_samples = []
+- for bcp, sample_type in sorted(pref.iteritems()):
++ for bcp, sample_type in sorted(pref.items()):
+ filename = '%s_%s.txt' % (bcp, sample_type)
+ filepath = path.join(sample_dir, filename)
+ with codecs.open(filepath, 'r', 'utf-8') as f:
+diff --git a/nototools/generate_samples.py b/nototools/generate_samples.py
+index 5aa2eb8..9de9e58 100755
+--- a/nototools/generate_samples.py
++++ b/nototools/generate_samples.py
+@@ -18,6 +18,8 @@ import argparse
+ import codecs
+ import re
+
++from nototools.py23 import unichr, unicode
++
+ """Generate samples from a description file."""
+
+ USAGE = """
+@@ -104,8 +106,9 @@ sequences within an group.
+ # about working with non-bmp unicode on narrow builds.
+
+ # constants
+-_LEAD_OFFSET = 0xD800 - (0x10000 >> 10);
+-_SURROGATE_OFFSET = 0x10000 - (0xD800 << 10) - 0xDC00;
++_LEAD_OFFSET = 0xD800 - (0x10000 >> 10)
++_SURROGATE_OFFSET = 0x10000 - (0xD800 << 10) - 0xDC00
++
+
+ def cp_to_str(cp):
+ if cp < 0x10000:
+@@ -115,7 +118,7 @@ def cp_to_str(cp):
+
+ def surrogate_pair_to_cp(low, high):
+ # assumes low and high are proper surrogate values
+- return (low << 10) + high + _SURROGATE_OFFSET;
++ return (low << 10) + high + _SURROGATE_OFFSET
+
+
+ def prev_cp(ustr, index):
+diff --git a/nototools/generate_website_2_data.py b/nototools/generate_website_2_data.py
+index 9abd96f..886c114 100755
+--- a/nototools/generate_website_2_data.py
++++ b/nototools/generate_website_2_data.py
+@@ -37,6 +37,7 @@ import xml.etree.cElementTree as ElementTree
+
+ from fontTools import ttLib
+
++from nototools.py23 import unicode
+ from nototools import cldr_data
+ from nototools import coverage
+ from nototools import create_image
+@@ -74,7 +75,7 @@ Built on %s from the following noto repositor%s:
+
+ def check_families(family_map):
+ # ensure the count of fonts in a family is what we expect
+- for family_id, family in sorted(family_map.iteritems()):
++ for family_id, family in sorted(family_map.items()):
+ hinted_members = family.hinted_members
+ unhinted_members = family.unhinted_members
+
+@@ -178,7 +179,7 @@ def get_family_id_to_lang_scrs(lang_scrs, script_to_family_ids):
+ if not jpan_lang_scrs:
+ break;
+
+- for f, ls in sorted(family_id_to_lang_scrs.iteritems()):
++ for f, ls in sorted(family_id_to_lang_scrs.items()):
+ if not ls:
+ print('!family %s has no lang' % f)
+
+@@ -282,7 +283,7 @@ def get_family_id_to_regions(family_id_to_lang_scr_to_sample_key):
+
+ family_id_to_regions = collections.defaultdict(set)
+ warnings = set()
+- for tup in family_id_to_lang_scr_to_sample_key.iteritems():
++ for tup in family_id_to_lang_scr_to_sample_key.items():
+ family_id, lang_scr_to_sample_key = tup
+ for lang_scr in lang_scr_to_sample_key:
+ if lang_scr in lang_scr_to_regions:
+@@ -301,7 +302,7 @@ def get_family_id_to_regions(family_id_to_lang_scr_to_sample_key):
+
+ def get_region_to_family_ids(family_id_to_regions):
+ region_to_family_ids = collections.defaultdict(set)
+- for family_id, regions in family_id_to_regions.iteritems():
++ for family_id, regions in family_id_to_regions.items():
+ for region in regions:
+ region_to_family_ids[region].add(family_id)
+ return region_to_family_ids
+@@ -500,7 +501,7 @@ def get_family_id_to_default_lang_scr(family_id_to_lang_scrs, families):
+ """
+
+ family_id_to_default_lang_scr = {}
+- for family_id, lang_scrs in family_id_to_lang_scrs.iteritems():
++ for family_id, lang_scrs in family_id_to_lang_scrs.items():
+ script_key = families[family_id].rep_member.script
+ primary_script = noto_fonts.script_key_to_primary_script(script_key)
+
+@@ -758,7 +759,7 @@ class WebGen(object):
+
+ def build_zips(self, families):
+ zip_info = {}
+- for key, family_data in families.iteritems():
++ for key, family_data in families.items():
+ zip_info[key] = self.build_family_zips(key, family_data)
+ return zip_info
+
+@@ -821,7 +822,7 @@ class WebGen(object):
+
+ def build_css(self, families):
+ css_info = {}
+- for key, family_data in families.iteritems():
++ for key, family_data in families.items():
+ css_info[key] = self.build_family_css(key, family_data)
+ return css_info
+
+@@ -887,7 +888,7 @@ class WebGen(object):
+
+ # get inverse map from lang_scr to family_id
+ lang_scr_to_family_ids = collections.defaultdict(set)
+- for family_id, lang_scrs in family_id_to_lang_scr_to_sample_key.iteritems():
++ for family_id, lang_scrs in family_id_to_lang_scr_to_sample_key.items():
+ for lang_scr in lang_scrs:
+ lang_scr_to_family_ids[lang_scr].add(family_id)
+
+@@ -998,7 +999,7 @@ class WebGen(object):
+ family_id_to_regions, family_css_info,
+ lang_scr_sort_order):
+ for family_id, lang_scrs_map in sorted(
+- family_id_to_lang_scr_to_sample_key.iteritems()):
++ family_id_to_lang_scr_to_sample_key.items()):
+ family = families[family_id]
+ regions = family_id_to_regions[family_id]
+ css_info = family_css_info[family_id]
+@@ -1115,7 +1116,7 @@ class WebGen(object):
+ # name them based on the language. But most of the samples with the
+ # same font and text will be the same, because the fonts generally
+ # only customize for a few language tags. Sad!
+- for lang_scr, sample_key in sorted(lang_scr_to_sample_key.iteritems()):
++ for lang_scr, sample_key in sorted(lang_scr_to_sample_key.items()):
+ sample_text, attrib, _ = sample_key_to_info[sample_key]
+ self.build_family_images(
+ family, lang_scr, sample_text, attrib, sample_key)
+@@ -1228,7 +1229,7 @@ class WebGen(object):
+ if 'families' in self.debug:
+ print('\n#debug families')
+ print('%d found' % len(families))
+- for i, (family_id, family) in enumerate(sorted(families.iteritems())):
++ for i, (family_id, family) in enumerate(sorted(families.items())):
+ print('%2d] %s (%s, %s)' % (
+ i, family_id, family.name, noto_fonts.get_family_filename(family)))
+ if family.hinted_members:
+@@ -1243,7 +1244,7 @@ class WebGen(object):
+ print('\n#debug script to family ids')
+ print('%d found' % len(script_to_family_ids))
+ for i, (script, family_ids) in enumerate(
+- sorted(script_to_family_ids.iteritems())):
++ sorted(script_to_family_ids.items())):
+ print('%2d] %s: %s' % (i, script, ', '.join(sorted(family_ids))))
+
+ all_lang_scrs = set(['und-' + script for script in script_to_family_ids])
+@@ -1264,7 +1265,7 @@ class WebGen(object):
+ if 'lang_scr_to_sample_infos' in self.debug:
+ print('\n#debug lang+script to sample infos')
+ print('%d found' % len(lang_scr_to_sample_infos))
+- for lang_scr, info_list in sorted(lang_scr_to_sample_infos.iteritems()):
++ for lang_scr, info_list in sorted(lang_scr_to_sample_infos.items()):
+ for info in info_list:
+ print('%s: %s, %s, len %d' % (
+ lang_scr, info[2], info[1], len(info[0])))
+@@ -1275,7 +1276,7 @@ class WebGen(object):
+ print('\n#debug family id to list of lang+script')
+ print('%d found' % len(family_id_to_lang_scrs))
+ for i, (family_id, lang_scrs) in enumerate(
+- sorted(family_id_to_lang_scrs.iteritems())):
++ sorted(family_id_to_lang_scrs.items())):
+ print('%3d] %s: (%d) %s' % (
+ i, family_id, len(lang_scrs), ' '.join(sorted(lang_scrs))))
+
+@@ -1286,16 +1287,16 @@ class WebGen(object):
+ print('\n#debug family id to map from lang+script to sample key')
+ print('%d found' % len(family_id_to_lang_scr_to_sample_key))
+ for i, (family_id, lang_scr_to_sample_key) in enumerate(
+- sorted(family_id_to_lang_scr_to_sample_key.iteritems())):
++ sorted(family_id_to_lang_scr_to_sample_key.items())):
+ print('%2d] %s (%d):' % (i, family_id, len(lang_scr_to_sample_key)))
+ for j, (lang_scr, sample_key) in enumerate(
+- sorted(lang_scr_to_sample_key.iteritems())):
++ sorted(lang_scr_to_sample_key.items())):
+ print(' [%2d] %s: %s' % (j, lang_scr, sample_key))
+ if 'sample_key_to_info' in self.debug:
+ print('\n#debug sample key to sample info')
+ print('%d found' % len(sample_key_to_info))
+ for i, (sample_key, info) in enumerate(
+- sorted(sample_key_to_info.iteritems())):
++ sorted(sample_key_to_info.items())):
+ print('%2d] %s: %s, len %d' % (
+ i, sample_key, info[1], len(info[0])))
+
+@@ -1305,7 +1306,7 @@ class WebGen(object):
+ print('\n#debug family id to regions')
+ print('%d found' % len(family_id_to_regions))
+ for i, (family_id, regions) in enumerate(
+- sorted(family_id_to_regions.iteritems())):
++ sorted(family_id_to_regions.items())):
+ print('%2d] %s: (%d) %s' % (
+ i, family_id, len(regions), ', '.join(sorted(regions))))
+
+@@ -1314,7 +1315,7 @@ class WebGen(object):
+ print('\n#debug region to family ids')
+ print('%d found' % len(region_to_family_ids))
+ for i, (region, family_ids) in enumerate(
+- sorted(region_to_family_ids.iteritems())):
++ sorted(region_to_family_ids.items())):
+ print('%2d] %s: (%d) %s' % (
+ i, region, len(family_ids), ', '.join(sorted(family_ids))))
+
+@@ -1324,7 +1325,7 @@ class WebGen(object):
+ print('\n#debug family id to default lang scr')
+ print('%d found' % len(family_id_to_default_lang_scr))
+ for i, (family_id, lang_scr) in enumerate(
+- sorted(family_id_to_default_lang_scr.iteritems())):
++ sorted(family_id_to_default_lang_scr.items())):
+ print('%2d] %s: %s' % (i, family_id, lang_scr))
+
+ region_data = get_region_lat_lng_data(region_to_family_ids.keys())
+@@ -1458,7 +1459,7 @@ def get_repo_info(skip_checks):
+ message = '\n'.join(msg_lines)
+ repo_info[repo_name] = message
+
+- for rname, v in sorted(repo_info.iteritems()):
++ for rname, v in sorted(repo_info.items()):
+ print('--%s--\n%s' % (rname, v))
+ if errors:
+ raise Exception('Some repos are not clean\n' + '\n'.join(errors))
+diff --git a/nototools/generate_website_data.py b/nototools/generate_website_data.py
+index 57697c8..dc0efb8 100755
+--- a/nototools/generate_website_data.py
++++ b/nototools/generate_website_data.py
+@@ -36,6 +36,7 @@ import xml.etree.cElementTree as ElementTree
+
+ from fontTools import ttLib
+
++from nototools.py23 import unichr, unicode
+ from nototools import coverage
+ from nototools import create_image
+ from nototools import extra_locale_data
+diff --git a/nototools/grab_adobe_download.py b/nototools/grab_adobe_download.py
+index 6b8725e..11a0672 100755
+--- a/nototools/grab_adobe_download.py
++++ b/nototools/grab_adobe_download.py
+@@ -56,8 +56,9 @@ import shutil
+ import sys
+ import zipfile
+
+-import notoconfig
+-import grab_download
++from nototools import notoconfig
++from nototools import grab_download
++
+
+ def unzip_to_directory_tree(drop_dir, filepath):
+ skip_re = re.compile('.*/OTF-Fallback/.*')
+@@ -88,7 +89,7 @@ def unzip_to_directory_tree(drop_dir, filepath):
+ def main():
+ params = {
+ 'default_srcdir': os.path.expanduser('~/Downloads'),
+- 'default_dstdir': notoconfig.values.get('adobe_data'),
++ 'default_dstdir': notoconfig._values.get('adobe_data'),
+ 'default_regex': r'Noto_Sans_CJK-\d{4}-\d{2}-\d{2}\.zip'
+ }
+ grab_download.invoke_main(
+diff --git a/nototools/grab_mt_download.py b/nototools/grab_mt_download.py
+index a37481a..775bb0c 100755
+--- a/nototools/grab_mt_download.py
++++ b/nototools/grab_mt_download.py
+@@ -43,7 +43,7 @@ built by this tool.
+ __author__ = "dougfelt at google.com (Doug Felt)"
+
+ import argparse
+-import cStringIO
++from io import BytesIO
+ import os
+ import os.path
+ import re
+@@ -53,8 +53,9 @@ import zipfile
+
+ from fontTools import ttLib
+
+-import grab_download
+-import notoconfig
++from nototools import grab_download
++from nototools import notoconfig
++
+
+ def write_data_to_file(data, root, subdir, filename):
+ dstdir = os.path.join(root, subdir)
+@@ -107,7 +108,7 @@ def unzip_to_directory_tree(drop_dir, filepath):
+ # it in the same subdir the .ttf file went into.
+ # else we put it at drop_dir (no subdir).
+ if name.endswith('.ttf'):
+- blobfile = cStringIO.StringIO(data)
++ blobfile = BytesIO(data)
+ font = ttLib.TTFont(blobfile)
+ subdir = 'hinted' if font.get('fpgm') or font.get('prep') else 'unhinted'
+ write_data_to_file(data, drop_dir, subdir, name)
+@@ -137,7 +138,7 @@ def unzip_to_directory_tree(drop_dir, filepath):
+ def main():
+ params = {
+ 'default_srcdir': os.path.expanduser('~/Downloads'),
+- 'default_dstdir': notoconfig.values.get('monotype_data'),
++ 'default_dstdir': notoconfig._values.get('monotype_data'),
+ 'default_regex': r'Noto.*_\d{8}.zip',
+ }
+ grab_download.invoke_main(
+diff --git a/nototools/hb_input.py b/nototools/hb_input.py
+index 00e3815..ba8a924 100644
+--- a/nototools/hb_input.py
++++ b/nototools/hb_input.py
+@@ -15,7 +15,6 @@
+
+ from __future__ import division, print_function
+
+-from fontTools.ttLib import TTFont
+ from nototools import summary
+ from nototools.py23 import unichr
+
+diff --git a/nototools/lang_data.py b/nototools/lang_data.py
+index dbec17b..52b2e56 100755
+--- a/nototools/lang_data.py
++++ b/nototools/lang_data.py
+@@ -310,13 +310,13 @@ def main():
+ print()
+ print('lang_script to names')
+ lang_script_to_names = _get_lang_script_to_names()
+- for t in sorted(lang_script_to_names.iteritems()):
++ for t in sorted(lang_script_to_names.items()):
+ print('%s: %s' % t)
+
+ print()
+ print('script to default lang')
+ script_to_default_lang = _get_script_to_default_lang()
+- for t in sorted(script_to_default_lang.iteritems()):
++ for t in sorted(script_to_default_lang.items()):
+ print('%s: %s' % t)
+
+
+diff --git a/nototools/lint_cmap_reqs.py b/nototools/lint_cmap_reqs.py
+index a4488ce..bb3abbd 100755
+--- a/nototools/lint_cmap_reqs.py
++++ b/nototools/lint_cmap_reqs.py
+@@ -19,6 +19,7 @@
+ import argparse
+ import sys
+
++from nototools.py23 import unicode
+ from nototools import lint_config
+ from nototools import noto_data
+ from nototools import opentype_data
+@@ -53,8 +54,8 @@ def _symbol_set():
+
+ def _math_set():
+ """Returns set of characters that should be supported in Noto Math."""
+- ranges = unicode_data._parse_code_ranges(noto_data.MATH_RANGES_TXT)
+- return _code_range_to_set(ranges)
++ ranges = unicode_data._parse_code_ranges(noto_data.MATH_RANGES_TXT)
++ return _code_range_to_set(ranges)
+
+
+ def _cjk_set():
+@@ -214,7 +215,8 @@ def main():
+ sys.stderr.write('writing %s\n' % args.outfile)
+ cmap_data.write_cmap_data_file(cmapdata, args.outfile, pretty=True)
+ else:
+- print(cmap_data.write_cmap_data(cmapdata, pretty=True))
++ print(unicode(cmap_data.write_cmap_data(cmapdata, pretty=True), "utf-8"))
++
+
+ if __name__ == "__main__":
+ main()
+diff --git a/nototools/lint_config.py b/nototools/lint_config.py
+index a0cd8f6..d44bc36 100755
+--- a/nototools/lint_config.py
++++ b/nototools/lint_config.py
+@@ -30,6 +30,7 @@
+ import argparse
+ import re
+
++from nototools.py23 import basestring
+
+ spec_format = """
+ A spec defines a list of conditions to be run in sequence. A condition consists of
+@@ -308,7 +309,7 @@ class FontCondition(object):
+ fn = value[0]
+ val = value[1]
+ cond_name = None
+- for fn_text, fn_obj in FontCondition.fn_map.iteritems():
++ for fn_text, fn_obj in FontCondition.fn_map.items():
+ if fn == fn_obj:
+ cond_name = fn_text
+ break
+@@ -320,7 +321,7 @@ class FontCondition(object):
+ cond_value = str(val)
+ return '%s %s' % (cond_name, cond_value)
+
+- output = ['\n %s: %s' % (k,value_str(v)) for k,v in self.__dict__.iteritems() if v]
++ output = ['\n %s: %s' % (k,value_str(v)) for k,v in self.__dict__.items() if v]
+ return 'condition:%s' % ''.join(output)
+
+
+diff --git a/nototools/match_font_names.py b/nototools/match_font_names.py
+index 7be5cb5..0fc8c3f 100644
+--- a/nototools/match_font_names.py
++++ b/nototools/match_font_names.py
+@@ -13,6 +13,7 @@ import re
+
+ from nototools import tool_utils
+
++
+ def _build_regex(names):
+ parts = []
+ for name in names:
+@@ -71,15 +72,15 @@ def _collect_names(names):
+
+
+ def main():
+- parser = argparse.ArgumentParser();
++ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-f', '--files', help='list of names and/or files (prefixed with \'@\'',
+ metavar='name', required=True, nargs='+')
+ parser.add_argument(
+ '-s', '--src_dir', help='directory under which to search for files',
+ metavar='dir', required=True)
+- args = parser.parse_args();
+- _print_list(match_files(args.src_dir, _collect_names(args.files)));
++ args = parser.parse_args()
++ _print_list(match_files(args.src_dir, _collect_names(args.files)))
+
+
+ if __name__ == '__main__':
+diff --git a/nototools/merge_fonts.py b/nototools/merge_fonts.py
+index 8703c3c..0cb26c8 100755
+--- a/nototools/merge_fonts.py
++++ b/nototools/merge_fonts.py
+@@ -126,14 +126,14 @@ def build_valid_filenames(files=files, directory=directory):
+ for f in files:
+ valid_file = directory + '/' + f
+ if not os.path.isfile(valid_file):
+- log.warn('can not find %s, skipping it.' % valid_file)
++ log.warning('can not find %s, skipping it.' % valid_file)
+ else:
+ valid_files.append(valid_file)
+
+ if len(valid_files) == 0:
+ return valid_files
+ if os.path.basename(valid_files[0]) != files[0]:
+- log.warn('can not find the font %s to read line metrics from. Line '
++ log.warning('can not find the font %s to read line metrics from. Line '
+ + 'metrics in the result might be wrong.' % files[0])
+ return valid_files
+
+@@ -153,7 +153,7 @@ def main():
+
+ valid_files = build_valid_filenames(directory=args.directory)
+ if len(valid_files) <= 1:
+- log.warn('expecting at least two fonts to merge, but only got %d '
++ log.warning('expecting at least two fonts to merge, but only got %d '
+ + 'font(s).', len(valid_files))
+ sys.exit(-1)
+
+diff --git a/nototools/merge_noto.py b/nototools/merge_noto.py
+index 5ea6a38..c1b3bf5 100755
+--- a/nototools/merge_noto.py
++++ b/nototools/merge_noto.py
+@@ -17,6 +17,7 @@
+ """Merges Noto fonts."""
+ import os.path
+ import tempfile
++from nototools.py23 import unicode
+
+ from fontTools import merge
+ from fontTools import ttLib
+@@ -51,6 +52,7 @@ def has_gsub_table(fontfile):
+ font = ttLib.TTFont(fontfile)
+ return 'GSUB' in font
+
++
+ SCRIPT_TO_OPENTYPE_SCRIPT_TAG = {
+ 'CypriotSyllabary': 'cprt',
+ 'Deseret': 'dsrt',
+diff --git a/nototools/missing_coverage.py b/nototools/missing_coverage.py
+index e941fb5..65fc88d 100755
+--- a/nototools/missing_coverage.py
++++ b/nototools/missing_coverage.py
+@@ -60,7 +60,7 @@ def display_missing(cmap_file):
+
+
+ def main():
+- default_cmap_name = 'noto_cmap_phase3.xml'
++ default_cmap_name = 'data/noto_cmap_phase3.xml'
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+@@ -70,5 +70,6 @@ def main():
+
+ display_missing(args.filename)
+
++
+ if __name__ == '__main__':
+ main()
+diff --git a/nototools/mti_cmap_data.py b/nototools/mti_cmap_data.py
+index f29098c..9a1b0bd 100755
+--- a/nototools/mti_cmap_data.py
++++ b/nototools/mti_cmap_data.py
+@@ -101,8 +101,8 @@ def get_script_to_cmaps(csvdata):
+ except:
+ raise ValueError('error in col %d of row %d: "%s"' % (
+ i, n, v))
+- return { script: (cmap, xcmap)
+- for script, cmap, xcmap in zip(header, data, xdata) }
++ return {script: (cmap, xcmap)
++ for script, cmap, xcmap in zip(header, data, xdata)}
+
+
+ def cmap_data_from_csv(
+@@ -111,7 +111,7 @@ def cmap_data_from_csv(
+ metadata = cmap_data.create_metadata('mti_cmap_data', args)
+ script_to_cmaps = get_script_to_cmaps(csvdata)
+ if scripts or exclude_scripts:
+- script_list = script_to_cmap.keys()
++ script_list = script_to_cmaps.keys()
+ for script in script_list:
+ if scripts and script not in scripts:
+ del script_to_cmaps[script]
+diff --git a/nototools/noto_cmap_reqs.py b/nototools/noto_cmap_reqs.py
+index f35ecee..536ac60 100755
+--- a/nototools/noto_cmap_reqs.py
++++ b/nototools/noto_cmap_reqs.py
+@@ -42,6 +42,7 @@ import argparse
+ import collections
+ import sys
+
++from nototools.py23 import unichr
+ from nototools import cldr_data
+ from nototools import cmap_data
+ from nototools import compare_cmap_data
+@@ -59,7 +60,7 @@ _MERGED_SCRIPTS_BY_TARGET = {
+ def _invert_script_to_chars(script_to_chars):
+ """Convert script_to_chars to char_to_scripts and return."""
+ char_to_scripts = collections.defaultdict(set)
+- for script, cps in script_to_chars.iteritems():
++ for script, cps in script_to_chars.items():
+ for cp in cps:
+ char_to_scripts[cp].add(script)
+ return char_to_scripts
+@@ -321,14 +322,14 @@ def _build_block_to_primary_script():
+ num += 1
+ max_script = None
+ max_script_count = 0
+- for script, count in script_counts.iteritems():
++ for script, count in script_counts.items():
+ if count > max_script_count:
+ max_script = script
+ max_script_count = count
+ if num == 0:
+ max_script = 'EXCL' # exclude
+ elif float(max_script_count) / num < 0.8:
+- info = sorted(script_counts.iteritems(), key=lambda t: (-t[1], t[0]))
++ info = sorted(script_counts.items(), key=lambda t: (-t[1], t[0]))
+ block_info = '%s %s' % (block, ', '.join('%s/%d' % t for t in info))
+ if block in assigned_primaries:
+ max_script = assigned_primaries[block]
+@@ -425,7 +426,7 @@ def _unassign_latin(cmap_ops):
+
+ def _assign_cldr_punct(cmap_ops):
+ """Assigns cldr punctuation to scripts."""
+- for script, punct in collect_cldr_punct.script_to_punct().iteritems():
++ for script, punct in collect_cldr_punct.script_to_punct().items():
+ if script != 'CURRENCY':
+ cmap_ops.phase('assign cldr punct for ' + script)
+ cmap_ops.ensure_script(script)
+@@ -449,7 +450,7 @@ def _reassign_scripts(cmap_ops, scripts, new_script):
+
+ def _reassign_merged_scripts(cmap_ops):
+ """Reassign merged scripts."""
+- for target, scripts in sorted(_MERGED_SCRIPTS_BY_TARGET.iteritems()):
++ for target, scripts in sorted(_MERGED_SCRIPTS_BY_TARGET.items()):
+ cmap_ops.phase('reassign to ' + target)
+ _reassign_scripts(cmap_ops, scripts, target)
+
+@@ -615,7 +616,7 @@ def _remove_empty(cmap_ops):
+ """Remove any empty scripts (Braille should be one)."""
+ cmap_ops.phase('remove empty')
+ script_to_chars = cmap_ops.create_script_to_chars()
+- for script, chars in script_to_chars.iteritems():
++ for script, chars in script_to_chars.items():
+ if not chars:
+ cmap_ops.delete_script(script)
+
+@@ -2623,7 +2624,7 @@ def _regen_script_required():
+ for script, comment, data in _SCRIPT_REQUIRED
+ }
+ scripts = set(unicode_data.all_scripts())
+- for to_script, from_scripts in _MERGED_SCRIPTS_BY_TARGET.iteritems():
++ for to_script, from_scripts in _MERGED_SCRIPTS_BY_TARGET.items():
+ scripts.add(to_script)
+ scripts -= set(from_scripts)
+ # keep extra script data, e.g. 'Aran'
+@@ -2688,7 +2689,7 @@ def _assign_script_required(cmap_ops):
+ def _assign_script_special_chars(cmap_ops):
+ """Assign special characters listed in opentype_data."""
+ cmap_ops.phase('assign special chars')
+- for script, chars in opentype_data.SPECIAL_CHARACTERS_NEEDED.iteritems():
++ for script, chars in opentype_data.SPECIAL_CHARACTERS_NEEDED.items():
+ cmap_ops.add_all(frozenset(chars), script)
+
+
+@@ -2698,7 +2699,7 @@ def _assign_legacy_phase2(cmap_ops):
+ legacy_map = cmap_data.create_map_from_table(legacy_data.table)
+ legacy_script_to_chars = {
+ script: tool_utils.parse_int_ranges(row.ranges)
+- for script, row in legacy_map.iteritems()}
++ for script, row in legacy_map.items()}
+
+ # The default is to include all legacy characters, except for the chars
+ # listed for these scripts, for some default chars, and for some scripts.
+@@ -2774,7 +2775,7 @@ def _assign_bidi_mirroring(cmap_ops):
+ cmap_ops.phase('bidi mirroring')
+ script_to_chars = cmap_ops.create_script_to_chars()
+ mirrored = unicode_data.mirrored_chars()
+- for script, cps in sorted(script_to_chars.iteritems()):
++ for script, cps in sorted(script_to_chars.items()):
+ mirrored_in_script = cps & mirrored
+ if not mirrored_in_script:
+ continue
+@@ -3141,7 +3142,7 @@ def _assign_dotted_circle(cmap_ops):
+ # circle, but as using dotted circle is the convention used by Unicode in
+ # their code charts we'll require it for Arabic too.
+ script_to_chars = cmap_ops.create_script_to_chars()
+- for script, charset in sorted(script_to_chars.iteritems()):
++ for script, charset in sorted(script_to_chars.items()):
+ if script == 'EXCL':
+ continue
+ nsm = frozenset(cp for cp in charset if is_combining(cp))
+diff --git a/nototools/noto_data.py b/nototools/noto_data.py
+index 9f92ffc..37c7600 100755
+--- a/nototools/noto_data.py
++++ b/nototools/noto_data.py
+@@ -184,13 +184,13 @@ def _char_set(compact_set_text):
+ if sep_index == -1:
+ cp = int(part, base=16)
+ assert cp > prev
+- # print '%04x' % cp
++ # print('%04x' % cp)
+ result.add(cp)
+ prev = cp
+ else:
+ start = int(part[:sep_index], base=16)
+ end = int(part[sep_index + 2:], base=16)
+- # print '%04x..%04x' % (start, end)
++ # print('%04x..%04x' % (start, end))
+ assert start > prev
+ assert end > start
+ for cp in range(start, end + 1):
+diff --git a/nototools/noto_font_cmaps.py b/nototools/noto_font_cmaps.py
+index 1799dcb..cea16c4 100755
+--- a/nototools/noto_font_cmaps.py
++++ b/nototools/noto_font_cmaps.py
+@@ -18,21 +18,12 @@
+
+ import argparse
+ import collections
+-import datetime
+-import os
+-from os import path
+ import sys
+
+-from fontTools import ttLib
+-
+-from nototools import cldr_data
++from nototools.py23 import unicode
+ from nototools import cmap_data
+ from nototools import lint_config
+-from nototools import noto_data
+ from nototools import noto_fonts
+-from nototools import noto_lint
+-from nototools import opentype_data
+-from nototools import unicode_data
+
+
+ def report_set_differences(name_to_cpset, out=sys.stderr):
+@@ -45,30 +36,29 @@ def report_set_differences(name_to_cpset, out=sys.stderr):
+ while len(name_to_cpset):
+ common = None
+ if len(name_to_cpset) > 1:
+- for name, cpset in name_to_cpset.iteritems():
++ for name, cpset in name_to_cpset.items():
+ if common == None:
+ common = cpset.copy()
+ else:
+ common &= cpset
+ if common:
+ name = ', '.join(sorted(name_to_cpset))
+- print >> out, '%d%s in common among %s:' % (
+- len(common), additional, name)
+- print >> out, lint_config.write_int_ranges(common)
++ out.write('%d%s in common among %s:\n' % (len(common), additional, name))
++ out.write('%s\n' % lint_config.write_int_ranges(common))
+
+- for name, cpset in sorted(name_to_cpset.iteritems()):
++ for name, cpset in sorted(name_to_cpset.items()):
+ extra = cpset - common
+ if extra:
+ name_to_cpset[name] = extra
+ else:
+- print >> out, '%s has no additional' % name
++ out.write('%s has no additional\n' % name)
+ del name_to_cpset[name]
+ additional = ' additional'
+ continue
+
+- for name, cpset in sorted(name_to_cpset.iteritems()):
+- print >> out, '%s has %d%s:' % (name, len(cpset), additional)
+- print >> out, lint_config.write_int_ranges(cpset)
++ for name, cpset in sorted(name_to_cpset.items()):
++ out.write('%s has %d%s:\n' % (name, len(cpset), additional))
++ out.write('%s\n' % lint_config.write_int_ranges(cpset))
+ break
+
+
+@@ -97,10 +87,10 @@ def font_cmap_data(paths):
+ script_to_data[script].append(ScriptData(family_name, script, cpset))
+
+ def report_data_error(index, script_data):
+- print >> sys.stderr, ' %d: %s, %d, %s' % (
++ sys.stderr.write(' %d: %s, %d, %s\n' % (
+ index, script_data.family_name, script_data.script,
+ len(script_data.cpset),
+- lint_config.write_int_ranges(script_data.cpset))
++ lint_config.write_int_ranges(script_data.cpset)))
+
+ script_to_cmap = {}
+ for script in sorted(script_to_data):
+@@ -116,7 +106,7 @@ def font_cmap_data(paths):
+ if len(test_data.cpset) > len(selected_cpset):
+ selected_cpset = test_data.cpset
+ if differ:
+- print >> sys.stderr, '\nscript %s cmaps differ' % script
++ sys.stderr.write('\nscript %s cmaps differ\n' % script)
+ differences = {i.family_name: i.cpset for i in data}
+ report_set_differences(differences)
+ script_to_cmap[script] = selected_cpset
+@@ -143,7 +133,7 @@ def main():
+ if args.outfile:
+ cmap_data.write_cmap_data_file(cmapdata, args.outfile, pretty=True)
+ else:
+- print(cmap_data.write_cmap_data(cmapdata, pretty=True))
++ print(unicode(cmap_data.write_cmap_data(cmapdata, pretty=True), "utf-8"))
+
+
+ if __name__ == "__main__":
+diff --git a/nototools/noto_font_coverage.py b/nototools/noto_font_coverage.py
+index afe7c2d..37ecc7e 100755
+--- a/nototools/noto_font_coverage.py
++++ b/nototools/noto_font_coverage.py
+@@ -43,8 +43,8 @@ def codepoints(cp_list):
+ temp = low
+ low = high
+ high = temp
+- for cp in range(low, high + 1):
+- result.add(cp)
++ for cp2 in range(low, high + 1):
++ result.add(cp2)
+ else:
+ result.add(int(cp, 16))
+ return result
+@@ -129,7 +129,7 @@ def run(args, families):
+ else:
+ missing.add(cp)
+ if result:
+- for k, v in sorted(result.iteritems()):
++ for k, v in sorted(result.items()):
+ print(' %s: %s' % (k, to_ranges_str(v)))
+ if missing:
+ print(' not supported: %s' % to_ranges_str(missing))
+@@ -162,5 +162,6 @@ def main():
+ families = noto_fonts.get_families(fonts)
+ run(args, families)
+
++
+ if __name__ == '__main__':
+ main()
+diff --git a/nototools/noto_fonts.py b/nototools/noto_fonts.py
+index 8af1286..f2fc9bd 100644
+--- a/nototools/noto_fonts.py
++++ b/nototools/noto_fonts.py
+@@ -169,7 +169,7 @@ def get_noto_font(filepath, family_name='Arimo|Cousine|Tinos|Noto',
+ slope, fmt) = match.groups()
+ else:
+ if _EXT_REGEX.match(filename):
+- print >> sys.stderr, '%s did not match font regex' % filename
++ sys.stderr.write('%s did not match font regex\n' % filename)
+ return None
+
+ is_cjk = filedir.endswith('noto-cjk')
+@@ -194,7 +194,7 @@ def get_noto_font(filepath, family_name='Arimo|Cousine|Tinos|Noto',
+ is_mono = mono == 'Mono'
+
+ if width not in [None, '', 'Condensed', 'SemiCondensed', 'ExtraCondensed']:
+- print >> sys.stderr, 'noto_fonts: Unexpected width "%s"' % width
++ sys.stderr.write('noto_fonts: Unexpected width "%s"\n' % (width))
+ if width in ['SemiCond', 'Narrow']:
+ width = 'SemiCondensed'
+ elif width == 'Cond':
+@@ -223,7 +223,7 @@ def get_noto_font(filepath, family_name='Arimo|Cousine|Tinos|Noto',
+ try:
+ script = convert_to_four_letter(script)
+ except ValueError:
+- print >> sys.stderr, 'unknown script: %s for %s' % (script, filename)
++ sys.stderr.write('unknown script: %s for %s\n' % (script, filename))
+ return None
+
+ if not weight:
+@@ -448,11 +448,13 @@ def get_noto_fonts(paths=NOTO_FONT_PATHS):
+ """Scan paths for fonts, and create a NotoFont for each one, returning a list
+ of these. 'paths' defaults to the standard noto font paths, using notoconfig."""
+
+- font_dirs = filter(None, [tool_utils.resolve_path(p) for p in paths])
++ font_dirs = list(filter(None, [tool_utils.resolve_path(p) for p in paths]))
+ print('Getting fonts from: %s' % font_dirs)
+
+ all_fonts = []
+ for font_dir in font_dirs:
++ if not os.path.exists(font_dir):
++ continue
+ for filename in os.listdir(font_dir):
+ if not _EXT_REGEX.match(filename):
+ continue
+@@ -508,7 +510,7 @@ def get_families(fonts):
+ family_id = noto_font_to_family_id(font)
+ family_id_to_fonts[family_id].add(font)
+
+- for family_id, fonts in family_id_to_fonts.iteritems():
++ for family_id, fonts in family_id_to_fonts.items():
+ hinted_members = []
+ unhinted_members = []
+ rep_member = None
+diff --git a/nototools/noto_lint.py b/nototools/noto_lint.py
+index f668f30..7f6290a 100755
+--- a/nototools/noto_lint.py
++++ b/nototools/noto_lint.py
+@@ -53,6 +53,11 @@ from nototools import render
+ from nototools import tool_utils
+ from nototools import unicode_data
+
++try:
++ from future_builtins import filter
++except ImportError:
++ pass
++
+ # from wikipedia windows 1252 page. As of windows 98.
+ WIN_ANSI_CODEPOINTS = (
+ '0000-007f 00A0-00ff 20ac 201a 0192 201e 2026 2020 2021 02c6 2030 0160 2039 0152 017d'
+@@ -373,7 +378,7 @@ def _build_cmap_dict(filename):
+ data = cmap_data.read_cmap_data_file(filename)
+ script_to_rowdata = cmap_data.create_map_from_table(data.table)
+ return {script: frozenset(tool_utils.parse_int_ranges(rd.ranges))
+- for script, rd in script_to_rowdata.iteritems()}
++ for script, rd in script_to_rowdata.items()}
+
+
+ _phase_2_map = None
+@@ -733,7 +738,7 @@ def check_font(font_props, filename_error,
+
+ if char_filter:
+ # old_needed_size = len(needed_chars)
+- needed_chars = set(itertools.ifilter(char_filter[1].accept, needed_chars))
++ needed_chars = set(filter(char_filter[1].accept, needed_chars))
+ # TODO(dougfelt): figure out how to make this info available without messing up output
+ # print('filter needed char size: %d -> %d' % (old_needed_size, len(needed_chars))
+
+@@ -751,7 +756,7 @@ def check_font(font_props, filename_error,
+ return
+ unexpected_chars = set(cmap) - expected_chars
+ if char_filter and unexpected_chars:
+- unexpected_chars = set(itertools.ifilter(char_filter[1].accept, unexpected_chars))
++ unexpected_chars = set(filter(char_filter[1].accept, unexpected_chars))
+ if unexpected_chars:
+ warn("cmap/script_unexpected", "Chars",
+ "The following %d chars were not expected in the font: %s"
+@@ -863,7 +868,7 @@ def check_font(font_props, filename_error,
+
+ if tests.check('cmap/non_characters'):
+ non_characters = frozenset(
+- range(0xFDD0, 0xFDEF + 1)
++ list(range(0xFDD0, 0xFDEF + 1))
+ + [0xFFFE + plane_no * 0x10000 for plane_no in range(0, 17)]
+ + [0xFFFF + plane_no * 0x10000 for plane_no in range(0, 17)])
+ non_characters_in_cmap = non_characters & set(cmap.keys())
+@@ -1769,7 +1774,7 @@ def check_font(font_props, filename_error,
+ if tests.check('advances/whitespace'):
+ if font_props.is_mono:
+ space_width = get_horizontal_advance(space_char)
+- cps = [tab_char, nbsp_char] + range(0x2000, 0x200B)
++ cps = [tab_char, nbsp_char] + list(range(0x2000, 0x200B))
+ for cp in cps:
+ if cp in cmap:
+ expect_width(cp, space_width)
+@@ -1823,7 +1828,7 @@ def check_font(font_props, filename_error,
+ # FIXME: Add support for Arabic, Syriac, Mongolian, Phags-Pa,
+ # Devanagari, Bengali, etc
+ joins_to_right = set(range(0x1680, 0x169B + 1))
+- joins_to_left = set(range(0x1680, 0x169A + 1) + [0x169C])
++ joins_to_left = set(list(range(0x1680, 0x169A + 1)) + [0x169C])
+ all_joining = joins_to_right | joins_to_left
+
+ glyf_table = font['glyf']
+diff --git a/nototools/noto_names.py b/nototools/noto_names.py
+index 55c31d8..819d231 100755
+--- a/nototools/noto_names.py
++++ b/nototools/noto_names.py
+@@ -38,7 +38,6 @@ file names that follow noto conventions, and generates the corresponding
+ name table names. So it is not useful for non-noto fonts.
+ """
+
+-from __future__ import print_function
+ import argparse
+ import collections
+ import datetime
+@@ -218,7 +217,7 @@ def _preferred_parts(noto_font):
+ parts_pair = _preferred_cjk_parts(noto_font)
+ else:
+ parts_pair = _preferred_non_cjk_parts(noto_font)
+- return filter(None, parts_pair[0]), filter(None, parts_pair[1])
++ return list(filter(None, parts_pair[0])), list(filter(None, parts_pair[1]))
+
+
+ def _shift_parts(family_parts, subfamily_parts, stop_fn):
+@@ -426,7 +425,7 @@ def _postscript_name(preferred_family, preferred_subfamily, include_regular):
+ result = re.sub('CJK(JP|KR|SC|TC)', repl_fn, result)
+
+ if len(result) > 63:
+- print('postscript name longer than 63 characters:\n"%s"' % (result), file=sys.stderr)
++ sys.stderr.write('postscript name longer than 63 characters:\n"%s"\n' % (result))
+ return result
+
+
+@@ -587,7 +586,7 @@ def name_table_data(noto_font, family_to_name_info, phase):
+ try:
+ info = family_to_name_info[family_id]
+ except KeyError:
+- print('no family name info for "%s"' % family_id, file=sys.stderr)
++ sys.stderr.write('no family name info for "%s"\n' % family_id)
+ return None
+
+ family_parts, subfamily_parts = _wws_parts(*_preferred_parts(noto_font))
+@@ -596,9 +595,9 @@ def name_table_data(noto_font, family_to_name_info, phase):
+ ['Bold'],
+ ['Italic'],
+ ['Bold', 'Italic']]:
+- print('Error in family name info: %s requires preferred names, but info says none are required.'
+- % path.basename(noto_font.filepath), file=sys.stderr)
+- print(subfamily_parts, file=sys.stderr)
++ sys.stderr.write('Error in family name info: %s requires preferred names, but info says none are required.\n'
++ % path.basename(noto_font.filepath))
++ sys.stderr.write('%s\n' % subfamily_parts)
+ return None
+
+ # for phase 3 we'll now force include_regular
+@@ -707,13 +706,13 @@ def create_family_to_name_info(notofonts, phase, extra_styles):
+ continue
+ seen_ids.add(family_id)
+ preferred_family, _ = _preferred_parts(noto_font)
+- preferred_subfamily = filter(None, [
++ preferred_subfamily = list(filter(None, [
+ 'Mono' if noto_font.is_mono else None,
+ 'UI' if noto_font.is_UI else None,
+ 'Display' if noto_font.is_display else None,
+ 'ExtraCondensed', # longest width name
+- 'ExtraLight', # longest weight name
+- 'Italic']) # longest slope name
++ 'ExtraLight', # longest weight name
++ 'Italic'])) # longest slope name
+ _, subfamily_parts = _wws_parts(preferred_family, preferred_subfamily)
+ family_to_parts[family_id].update(subfamily_parts)
+ family_parts, _ = _original_parts(preferred_family, preferred_subfamily)
+@@ -723,7 +722,7 @@ def create_family_to_name_info(notofonts, phase, extra_styles):
+
+
+ result = {}
+- for family_id, part_set in family_to_parts.iteritems():
++ for family_id, part_set in family_to_parts.items():
+ # Even through CJK mono fonts are in their own families and have only
+ # bold and regular weights, they behave like they have more weights like
+ # the rest of CJK.
+@@ -860,7 +859,7 @@ def _dump_name_data(name_data):
+ if attr == 'original_family' and len(value) > ORIGINAL_FAMILY_LIMIT:
+ print('## family too long (%2d): %s' % (len(value), value))
+ err = True
+- print(' %20s: %s' % (attr, value))
++ print(' %20s: %s' % (attr, value))
+ else:
+ print(' %20s: <none>' % attr)
+ return err
+diff --git a/nototools/py23.py b/nototools/py23.py
+index 1a87b51..17e22e9 100644
+--- a/nototools/py23.py
++++ b/nototools/py23.py
+@@ -9,3 +9,7 @@ try:
+ except NameError:
+ unichr = chr
+
++try:
++ basestring = basestring
++except NameError:
++ basestring = str
+diff --git a/nototools/report_coverage_data.py b/nototools/report_coverage_data.py
+index ee243d2..cbaaabc 100755
+--- a/nototools/report_coverage_data.py
++++ b/nototools/report_coverage_data.py
+@@ -13,6 +13,7 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++from __future__ import print_function
+
+ import argparse
+ import codecs
+@@ -21,17 +22,14 @@ import math
+ from os import path
+ import sys
+
+-from nototools import cmap_data
+-from nototools import coverage
+ from nototools import generate_coverage_data
+ from nototools import tool_utils
+ from nototools import unicode_data
+
+-from fontTools import ttLib
+-
+ default_version=6.0
+ default_coverage_file = 'noto_cmap_phase2.xml'
+
++
+ def get_defined_cps(version=default_version, exclude_ranges=None):
+ defined_cps = unicode_data.defined_characters(version)
+ if exclude_ranges:
+@@ -143,7 +141,7 @@ def write_block_coverage_html(block_data, names, msg, out_file=sys.stdout):
+
+ def write_block_coverage_text(block_data, names, msg, out_file=sys.stdout):
+ block_data.sort()
+- print >> out_file, msg
++ print(msg, file=out_file)
+ name_len = max(len(t[2]) for t in block_data)
+ fmt_str = '%%%ds' % name_len
+ fmt_str = '%13s ' + fmt_str + ' %5s'
+@@ -153,7 +151,7 @@ def write_block_coverage_text(block_data, names, msg, out_file=sys.stdout):
+ header_parts.append(fmt_str % ('range', 'block name', 'count'))
+ for fmt, name in zip(header_fmts, names):
+ header_parts.append(fmt % name)
+- print >> out_file, ' '.join(header_parts)
++ print(' '.join(header_parts), file=out_file)
+ for start, end, name, block_cps, block_covered_cps_list in block_data:
+ line_parts = []
+ range_str = '%04x-%04x' % (start, end)
+@@ -164,7 +162,7 @@ def write_block_coverage_text(block_data, names, msg, out_file=sys.stdout):
+ pct = '%d%%' % int(100.0 * num_covered / num_in_block)
+ part_str = '%5d %4s' % (num_covered, pct)
+ line_parts.append(fmt % part_str)
+- print >> out_file, ' '.join(line_parts)
++ print(' '.join(line_parts), file=out_file)
+ out_file.flush()
+
+
+@@ -266,5 +264,6 @@ def main():
+ write_block_coverage(
+ block_data, names, args.message, args.format, args.output_file)
+
++
+ if __name__ == '__main__':
+ main()
+diff --git a/nototools/sample_with_font.py b/nototools/sample_with_font.py
+index b1df256..cb96e5d 100755
+--- a/nototools/sample_with_font.py
++++ b/nototools/sample_with_font.py
+@@ -21,14 +21,14 @@ Replicating the sample text they describe can be a bit tedious. This
+ lets you interactively search characters in the font by name to assemble
+ a string and save it to a file."""
+
++from builtins import input
+ import argparse
+ import codecs
+-import readline
+
++from nototools.py23 import unichr
+ from nototools import coverage
+ from nototools import unicode_data
+
+-from fontTools import ttLib
+
+ def _help():
+ print ('enter a string to match or one of the following:\n'
+@@ -44,7 +44,7 @@ def _build_text(name_map, initial_text=''):
+ text = initial_text
+ print('build text using map of length %d' % len(name_map))
+ while True:
+- line = raw_input('> ')
++ line = input('> ')
+ if not line:
+ continue
+ if line == 'quit':
+@@ -64,13 +64,13 @@ def _build_text(name_map, initial_text=''):
+ text = ''
+ continue
+ if line == 'write':
+- line = raw_input('file name> ')
++ line = input('file name> ')
+ if line:
+ _write_text(line, text)
+ continue
+
+ matches = []
+- for name, cp in sorted(name_map.iteritems()):
++ for name, cp in sorted(name_map.items()):
+ if line in name:
+ matches.append(name)
+ if not matches:
+@@ -104,7 +104,7 @@ def _build_text(name_map, initial_text=''):
+ print('multiple matches:\n ' + '\n '.join(
+ '[%2d] %s' % (i, n) for i, n in enumerate(matches)))
+ while True:
+- line = raw_input('0-%d or q to skip> ' % (len(matches) - 1))
++ line = input('0-%d or q to skip> ' % (len(matches) - 1))
+ if line == 'q':
+ select_multiple = False
+ break
+diff --git a/nototools/subset.py b/nototools/subset.py
+index cb9ac96..a9a86ef 100755
+--- a/nototools/subset.py
++++ b/nototools/subset.py
+@@ -22,7 +22,7 @@ import sys
+
+ from fontTools import subset
+
+-import coverage
++from nototools import coverage
+
+
+ def subset_font(source_file, target_file,
+@@ -59,7 +59,7 @@ def subset_font(source_file, target_file,
+ opt.drop_tables = ['+TTFA']
+
+ if options is not None:
+- for name, value in options.iteritems():
++ for name, value in options.items():
+ setattr(opt, name, value)
+
+ if include is not None:
+diff --git a/nototools/subset_symbols.py b/nototools/subset_symbols.py
+index af2ca10..ed64147 100755
+--- a/nototools/subset_symbols.py
++++ b/nototools/subset_symbols.py
+@@ -20,7 +20,7 @@ __author__ = 'roozbeh at google.com (Roozbeh Pournader)'
+
+ import sys
+
+-import subset
++from nototools import subset
+
+
+ def main(argv):
+diff --git a/nototools/swat_license.py b/nototools/swat_license.py
+index 5b5c078..2263a5d 100755
+--- a/nototools/swat_license.py
++++ b/nototools/swat_license.py
+@@ -362,7 +362,7 @@ def _construct_ttc_fonts(fonts, dst_root, dry_run):
+ basename = path.basename(font.filepath)
+ basename_to_fonts[basename].append(font)
+
+- for ttcfont, components in sorted(_ttc_fonts.iteritems()):
++ for ttcfont, components in sorted(_ttc_fonts.items()):
+ rel_filepath = _noto_relative_path(ttcfont.filepath)
+ print('-----\nBuilding %s' % rel_filepath)
+
+@@ -373,7 +373,7 @@ def _construct_ttc_fonts(fonts, dst_root, dry_run):
+ possible_components = basename_to_fonts.get(component)
+ if not possible_components:
+ print('! no match for component named %s in %s' % (
+- component, rel_path))
++ component, rel_filepath))
+ component_list = []
+ break
+
+@@ -383,7 +383,7 @@ def _construct_ttc_fonts(fonts, dst_root, dry_run):
+ if matched_possible_component:
+ print('! already matched possible component %s for %s' % (
+ matched_possible_component.filename,
+- possible_component_filename))
++ possible_component.filename))
+ matched_possible_component = None
+ break
+ matched_possible_component = possible_component
+@@ -393,7 +393,7 @@ def _construct_ttc_fonts(fonts, dst_root, dry_run):
+ break
+ component_list.append(matched_possible_component)
+ if not component_list:
+- print('! cannot generate ttc font %s' % rel_path)
++ print('! cannot generate ttc font %s' % rel_filepath)
+ continue
+
+ print('components:\n ' + '\n '.join(
+diff --git a/nototools/test_vertical_extents.py b/nototools/test_vertical_extents.py
+index 22e2a6b..27288e0 100755
+--- a/nototools/test_vertical_extents.py
++++ b/nototools/test_vertical_extents.py
+@@ -37,8 +37,8 @@ import re
+ import sys
+ import xml.etree.ElementTree
+
+-import coverage
+-
++from nototools.py23 import unichr, unicode
++from nototools import coverage
+ from nototools import font_caching
+ from nototools import render
+
+diff --git a/nototools/tool_utils.py b/nototools/tool_utils.py
+index fe351cc..81a2be1 100644
+--- a/nototools/tool_utils.py
++++ b/nototools/tool_utils.py
+@@ -166,7 +166,7 @@ def generate_zip_with_7za_from_filepairs(pairs, archive_path):
+ if source_root not in pair_map:
+ pair_map[source_root] = set()
+ pair_map[source_root].add(dest)
+- for source_root, dest_set in pair_map.iteritems():
++ for source_root, dest_set in pair_map.items():
+ generate_zip_with_7za(source_root, sorted(dest_set), archive_path)
+
+
+@@ -339,7 +339,7 @@ def git_head_commit(repo):
+ text = subprocess.check_output(
+ ['git', 'show', '-s', '--date=format:%Y-%m-%d %H:%M:%S',
+ '--no-expand-tabs', '--pretty=format:%H\t%cd\t%s', 'HEAD'])
+- return tuple(text.strip().split('\t', 2))
++ return tuple(text.strip().split(b'\t', 2))
+
+
+ def git_check_remote_commit(repo, commit, remote='upstream', branch='master'):
+diff --git a/nototools/ttc_utils.py b/nototools/ttc_utils.py
+index 37a4453..753fcc4 100755
+--- a/nototools/ttc_utils.py
++++ b/nototools/ttc_utils.py
+@@ -25,6 +25,7 @@ import subprocess
+
+ from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e as NameTable
+
++from nototools.py23 import unicode
+ from nototools import tool_utils
+
+ _ttcHeader = '>4sLL'
+diff --git a/nototools/unicode_data.py b/nototools/unicode_data.py
+index e3a9d66..e91b861 100755
+--- a/nototools/unicode_data.py
++++ b/nototools/unicode_data.py
+@@ -32,9 +32,8 @@ import collections
+ import os
+ from os import path
+ import re
+-import sys
+
+-from nototools.py23 import unichr, unicode
++from nototools.py23 import unichr, unicode, basestring
+ try:
+ import unicodedata2 as unicodedata # Unicode 8 compliant native lib
+ except ImportError:
+@@ -1051,7 +1050,7 @@ def _load_emoji_group_data():
+ group_list.extend(_read_emoji_test_data(_SUPPLEMENTAL_EMOJI_GROUP_DATA))
+ for i, (seq, group, subgroup, name) in enumerate(group_list):
+ if seq in _emoji_group_data:
+- print('seq %s alredy in group data as %s' % (seq_to_string(seq), _emoji_group_data[seq]))
++ print('seq %s already in group data as %s' % (seq_to_string(seq), _emoji_group_data[seq]))
+ print(' new value would be %s' % str((i, group, subgroup, name)))
+ _emoji_group_data[seq] = (i, group, subgroup, name)
+
+diff --git a/nototools/unittests/font_tests.py b/nototools/unittests/font_tests.py
+index 8ee0835..ac6a9a8 100644
+--- a/nototools/unittests/font_tests.py
++++ b/nototools/unittests/font_tests.py
+@@ -684,7 +684,7 @@ class TestGlyphAreas(unittest.TestCase):
+ errors = []
+ for other in glyph_sets[1:]:
+ other_pen = GlyphAreaPen(other)
+- for name, area in areas.iteritems():
++ for name, area in areas.items():
+ if name in self.whitelist:
+ continue
+ other[name].draw(other_pen)
+diff --git a/nototools/update_alpha.py b/nototools/update_alpha.py
+index c6155bd..153ae23 100755
+--- a/nototools/update_alpha.py
++++ b/nototools/update_alpha.py
+@@ -34,8 +34,8 @@ import shutil
+ import subprocess
+ import sys
+
+-import notoconfig
+-import compare_summary
++from nototools import notoconfig
++from nototools import compare_summary
+
+ class RedirectStdout(object):
+ """Redirect stdout to file."""
+@@ -112,7 +112,7 @@ def push_to_noto_alpha(alphadir, srcdir, dry_run):
+ new_label = 'h/u'
+ if new_label:
+ name_info[root_name] = new_label
+- names = ', '.join(sorted(['%s(%s)' % (k, v) for k, v in name_info.iteritems()]))
++ names = ', '.join(sorted(['%s(%s)' % (k, v) for k, v in name_info.items()]))
+
+ # get date of the drop from srcdir
+ result = re.search(r'\d{4}_\d{2}_\d{2}', srcdir)
+diff --git a/nototools/update_cldr.py b/nototools/update_cldr.py
+index 261b07d..a5a4d47 100755
+--- a/nototools/update_cldr.py
++++ b/nototools/update_cldr.py
+@@ -24,8 +24,8 @@ import shutil
+ import string
+ import subprocess
+
+-import notoconfig
+-import tool_utils
++from nototools import notoconfig
++from nototools import tool_utils
+
+ CLDR_SUBDIRS = [
+ 'common/main',
+diff --git a/nototools/update_udhr_samples.py b/nototools/update_udhr_samples.py
+index 5ebaf7a..698952a 100755
+--- a/nototools/update_udhr_samples.py
++++ b/nototools/update_udhr_samples.py
+@@ -24,17 +24,21 @@ import codecs
+ import collections
+ import datetime
+ import difflib
+-import generate_website_data
+ import os
+ import re
+ import shutil
+-import unicode_data
+-import urllib
++
++try:
++ from urllib.request import urlretrieve
++except:
++ from urllib import urlretrieve
++
+ import xml.etree.ElementTree as ET
+-import zipfile
+
+-from nototools import cldr_data
++from nototools.py23 import unicode
++from nototools import generate_website_data
+ from nototools import tool_utils
++from nototools import unicode_data
+
+ DIR_URL = 'http://unicode.org/udhr/d'
+ UDHR_XML_ZIP_NAME = 'udhr_xml.zip'
+@@ -44,7 +48,7 @@ def fetch_udhr(fetch_dir):
+ """Fetch UDHR xml bundle from unicode.org to fetch_dir."""
+ fetch_dir = tool_utils.ensure_dir_exists(fetch_dir)
+ dstfile = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME)
+- result = urllib.urlretrieve(UDHR_XML_ZIP_URL, dstfile)
++ result = urlretrieve(UDHR_XML_ZIP_URL, dstfile)
+ print('Fetched: ' + result[0])
+
+
+@@ -324,7 +328,7 @@ def add_likely_scripts(bcp_to_code):
+ """Add script subtags where they are not present in the bcp code. If
+ we don't know the script"""
+ result= {}
+- for bcp, code in bcp_to_code.iteritems():
++ for bcp, code in bcp_to_code.items():
+ if code in CODE_TO_BCP:
+ new_bcp = CODE_TO_BCP[code]
+ else:
+@@ -361,7 +365,7 @@ EXCLUDE_CODES = frozenset([
+
+ def filter_bcp_to_code(bcp_to_code):
+ """Exclude entries for samples improved in noto/sample_texts and for bad samples."""
+- return {k: v for k, v in bcp_to_code.iteritems()
++ return {k: v for k, v in bcp_to_code.items()
+ if k not in EXCLUDE_BCP and v not in EXCLUDE_CODES}
+
+
+@@ -481,7 +485,7 @@ def get_bcp_to_code_attrib_sample(src_dir, ohchr_dir):
+ def print_bcp_to_code_attrib_sample(bcp_to_code_attrib_sample):
+ print('index size: %s' % len(bcp_to_code_attrib_sample))
+ for bcp, (code, attrib, sample) in sorted(
+- bcp_to_code_attrib_sample.iteritems()):
++ bcp_to_code_attrib_sample.items()):
+ print('%s: %s, %s\n "%s"' % (bcp, code, attrib, sample))
+
+
+@@ -656,7 +660,7 @@ def update_samples(
+ sample_attrib_list = []
+ sample_dir = tool_utils.ensure_dir_exists(sample_dir)
+ count = 0
+- for bcp, (code, attrib, sample) in bcp_to_code_attrib_sample.iteritems():
++ for bcp, (code, attrib, sample) in bcp_to_code_attrib_sample.items():
+ dst_file = '%s_udhr.txt' % bcp
+ dst_path = os.path.join(sample_dir, dst_file)
+ if in_repo and os.path.isfile(dst_path) and dst_file not in tool_samples:
+@@ -817,7 +821,7 @@ def compare_samples(base_dir, trg_dir, trg_to_base_name=lambda x: x, opts=None):
+ with codecs.open(trg_path, 'r', 'utf8') as f:
+ trg_text = f.read()
+ if not base_text:
+- print('base text (%s) is empty' % k)
++ print('base text (%s) is empty' % base_path)
+ continue
+ if not trg_text:
+ print('target text is empty: %s' % trg_path)
+@@ -954,7 +958,7 @@ def main():
+ in_repo, args.no_stage)
+
+ if args.mapping:
+- print_bcp_to_code_attrib(bcp_to_code_attrib)
++ print_bcp_to_code_attrib_sample(bcp_to_code_attrib_sample)
+
+ if args.base_sample_dir:
+ compare_samples(
diff -Nru nototools-0.2.0/debian/patches/series nototools-0.2.0/debian/patches/series
--- nototools-0.2.0/debian/patches/series 2019-10-21 11:04:18.000000000 +0000
+++ nototools-0.2.0/debian/patches/series 2019-12-11 00:44:06.000000000 +0000
@@ -1 +1,2 @@
test_vertical_extensions-Correctly-use-local-imports.patch
+more-python3-fixes.patch
More information about the Pkg-fonts-devel
mailing list