diff --git a/Lib/gftools/actions/checkgooglefonts.py b/Lib/gftools/actions/checkgooglefonts.py index 5acd3447..811114cd 100644 --- a/Lib/gftools/actions/checkgooglefonts.py +++ b/Lib/gftools/actions/checkgooglefonts.py @@ -6,7 +6,9 @@ if __name__ == "__main__": - config = yaml.load(open(os.path.join("sources", "config.yaml")), Loader=yaml.FullLoader) + config = yaml.load( + open(os.path.join("sources", "config.yaml")), Loader=yaml.FullLoader + ) if "googleFonts" in config and config["googleFonts"]: print("This font should be submitted to Google Fonts") print(f"::set-output name=is_gf::true") diff --git a/Lib/gftools/actions/checkversionbump.py b/Lib/gftools/actions/checkversionbump.py index 10746bd4..d3eb86ce 100644 --- a/Lib/gftools/actions/checkversionbump.py +++ b/Lib/gftools/actions/checkversionbump.py @@ -64,7 +64,9 @@ def version_has_ever_changed(file, version): if __name__ == "__main__": - config = yaml.load(open(os.path.join("sources", "config.yaml")), Loader=yaml.FullLoader) + config = yaml.load( + open(os.path.join("sources", "config.yaml")), Loader=yaml.FullLoader + ) sources = config["sources"] current_version = None diff --git a/Lib/gftools/actions/getlatestversion.py b/Lib/gftools/actions/getlatestversion.py index ea9c112d..d869c3e5 100644 --- a/Lib/gftools/actions/getlatestversion.py +++ b/Lib/gftools/actions/getlatestversion.py @@ -4,14 +4,19 @@ from github import Github import re + def get_latest_release(family, user=None, repo=None): if not (user and repo): - repo_url = subprocess.check_output(["git", "remote", "get-url", "origin"]).decode("utf8").strip() + repo_url = ( + subprocess.check_output(["git", "remote", "get-url", "origin"]) + .decode("utf8") + .strip() + ) url_split = repo_url.split("/") user, repo = url_split[3], url_split[4] g = Github(os.environ["GITHUB_TOKEN"]) - repo = g.get_repo(user + '/' + repo) + repo = g.get_repo(user + "/" + repo) for release in repo.get_releases(): if release.draft: continue @@ -27,16 +32,19 @@ def get_latest_release(family, user=None, repo=None): return version, download_url return None, None + if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description="Return the URL of a font's latest release artefact") - parser.add_argument('--user', help='the repository username', default="notofonts") - parser.add_argument('--repo', help='the repository name') - parser.add_argument('family', help='the font family name') + + parser = argparse.ArgumentParser( + description="Return the URL of a font's latest release artefact" + ) + parser.add_argument("--user", help="the repository username", default="notofonts") + parser.add_argument("--repo", help="the repository name") + parser.add_argument("family", help="the font family name") args = parser.parse_args() version, download_url = get_latest_release(args.family, args.user, args.repo) if version and download_url: print(f"::set-output name=version::{version}") print(f"::set-output name=url::{download_url}") - diff --git a/Lib/gftools/actions/qa2issue.py b/Lib/gftools/actions/qa2issue.py index 86d4e7d6..0cc2ff6e 100644 --- a/Lib/gftools/actions/qa2issue.py +++ b/Lib/gftools/actions/qa2issue.py @@ -14,14 +14,22 @@ from gftools.gfgithub import GitHubClient if __name__ == "__main__": - url_split = subprocess.check_output(["git", "remote", "get-url", "origin"]).decode("utf8").strip().split("/") + url_split = ( + subprocess.check_output(["git", "remote", "get-url", "origin"]) + .decode("utf8") + .strip() + .split("/") + ) client = GitHubClient(url_split[3], url_split[4]) - parser = argparse.ArgumentParser(description='Create or update github issue') - parser.add_argument('--template', help='the issue name', - default="Fontbakery QA Report for Version {}") - parser.add_argument('version', help='the proposed version') - parser.add_argument('file', help='file containing MarkDown content') + parser = argparse.ArgumentParser(description="Create or update github issue") + parser.add_argument( + "--template", + help="the issue name", + default="Fontbakery QA Report for Version {}", + ) + parser.add_argument("version", help="the proposed version") + parser.add_argument("file", help="file containing MarkDown content") args = parser.parse_args() label = f"qa-{args.version}" @@ -45,4 +53,6 @@ client._post(client.rest_url(f"issues/{number}/labels"), {"labels": [label]}) see_url = response["html_url"] - print(f"::error file=sources/config.yaml,title=Fontbakery check failed::See {see_url}") + print( + f"::error file=sources/config.yaml,title=Fontbakery check failed::See {see_url}" + ) diff --git a/Lib/gftools/axes_pb2.py b/Lib/gftools/axes_pb2.py index 2e201c7e..c0ff0537 100644 --- a/Lib/gftools/axes_pb2.py +++ b/Lib/gftools/axes_pb2.py @@ -7,38 +7,45 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\naxes.proto"\xcc\x01\n\tAxisProto\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x07 \x01(\t\x12\x11\n\tmin_value\x18\x02 \x01(\x02\x12\x15\n\rdefault_value\x18\x03 \x01(\x02\x12\x11\n\tmax_value\x18\x04 \x01(\x02\x12\x11\n\tprecision\x18\x05 \x01(\x05\x12 \n\x08\x66\x61llback\x18\x06 \x03(\x0b\x32\x0e.FallbackProto\x12\x13\n\x0b\x64\x65scription\x18\x08 \x01(\t\x12\x15\n\rfallback_only\x18\t \x01(\x08"B\n\rFallbackProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t' +) -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\naxes.proto\"\xcc\x01\n\tAxisProto\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x07 \x01(\t\x12\x11\n\tmin_value\x18\x02 \x01(\x02\x12\x15\n\rdefault_value\x18\x03 \x01(\x02\x12\x11\n\tmax_value\x18\x04 \x01(\x02\x12\x11\n\tprecision\x18\x05 \x01(\x05\x12 \n\x08\x66\x61llback\x18\x06 \x03(\x0b\x32\x0e.FallbackProto\x12\x13\n\x0b\x64\x65scription\x18\x08 \x01(\t\x12\x15\n\rfallback_only\x18\t \x01(\x08\"B\n\rFallbackProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t') - - - -_AXISPROTO = DESCRIPTOR.message_types_by_name['AxisProto'] -_FALLBACKPROTO = DESCRIPTOR.message_types_by_name['FallbackProto'] -AxisProto = _reflection.GeneratedProtocolMessageType('AxisProto', (_message.Message,), { - 'DESCRIPTOR' : _AXISPROTO, - '__module__' : 'axes_pb2' - # @@protoc_insertion_point(class_scope:AxisProto) - }) +_AXISPROTO = DESCRIPTOR.message_types_by_name["AxisProto"] +_FALLBACKPROTO = DESCRIPTOR.message_types_by_name["FallbackProto"] +AxisProto = _reflection.GeneratedProtocolMessageType( + "AxisProto", + (_message.Message,), + { + "DESCRIPTOR": _AXISPROTO, + "__module__": "axes_pb2" + # @@protoc_insertion_point(class_scope:AxisProto) + }, +) _sym_db.RegisterMessage(AxisProto) -FallbackProto = _reflection.GeneratedProtocolMessageType('FallbackProto', (_message.Message,), { - 'DESCRIPTOR' : _FALLBACKPROTO, - '__module__' : 'axes_pb2' - # @@protoc_insertion_point(class_scope:FallbackProto) - }) +FallbackProto = _reflection.GeneratedProtocolMessageType( + "FallbackProto", + (_message.Message,), + { + "DESCRIPTOR": _FALLBACKPROTO, + "__module__": "axes_pb2" + # @@protoc_insertion_point(class_scope:FallbackProto) + }, +) _sym_db.RegisterMessage(FallbackProto) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - _AXISPROTO._serialized_start=15 - _AXISPROTO._serialized_end=219 - _FALLBACKPROTO._serialized_start=221 - _FALLBACKPROTO._serialized_end=287 + DESCRIPTOR._options = None + _AXISPROTO._serialized_start = 15 + _AXISPROTO._serialized_end = 219 + _FALLBACKPROTO._serialized_start = 221 + _FALLBACKPROTO._serialized_end = 287 # @@protoc_insertion_point(module_scope) diff --git a/Lib/gftools/builder/recipeproviders/__init__.py b/Lib/gftools/builder/recipeproviders/__init__.py index ad7d2d50..e86900cc 100644 --- a/Lib/gftools/builder/recipeproviders/__init__.py +++ b/Lib/gftools/builder/recipeproviders/__init__.py @@ -26,7 +26,6 @@ def sources(self) -> List[File]: return [get_file(str(p)) for p in self.config["sources"]] - def get_provider(provider: str): # First try gftools.builder.recipeproviders.X try: diff --git a/Lib/gftools/constants.py b/Lib/gftools/constants.py index b439209d..0552316e 100644 --- a/Lib/gftools/constants.py +++ b/Lib/gftools/constants.py @@ -45,30 +45,30 @@ NAMEID_DARK_BACKGROUD_PALETTE = 24 NAMEID_STR = { - NAMEID_COPYRIGHT_NOTICE: "COPYRIGHT_NOTICE", - NAMEID_FONT_FAMILY_NAME: "FONT_FAMILY_NAME", - NAMEID_FONT_SUBFAMILY_NAME: "FONT_SUBFAMILY_NAME", - NAMEID_UNIQUE_FONT_IDENTIFIER: "UNIQUE_FONT_IDENTIFIER", - NAMEID_FULL_FONT_NAME: "FULL_FONT_NAME", - NAMEID_VERSION_STRING: "VERSION_STRING", - NAMEID_POSTSCRIPT_NAME: "POSTSCRIPT_NAME", - NAMEID_TRADEMARK: "TRADEMARK", - NAMEID_MANUFACTURER_NAME: "MANUFACTURER_NAME", - NAMEID_DESIGNER: "DESIGNER", - NAMEID_DESCRIPTION: "DESCRIPTION", - NAMEID_VENDOR_URL: "VENDOR_URL", - NAMEID_DESIGNER_URL: "DESIGNER_URL", - NAMEID_LICENSE_DESCRIPTION: "LICENSE_DESCRIPTION", - NAMEID_LICENSE_INFO_URL: "LICENSE_INFO_URL", - NAMEID_TYPOGRAPHIC_FAMILY_NAME: "TYPOGRAPHIC_FAMILY_NAME", - NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME: "TYPOGRAPHIC_SUBFAMILY_NAME", - NAMEID_COMPATIBLE_FULL_MACONLY: "COMPATIBLE_FULL_MACONLY", - NAMEID_SAMPLE_TEXT: "SAMPLE_TEXT", - NAMEID_POSTSCRIPT_CID_NAME: "POSTSCRIPT_CID_NAME", - NAMEID_WWS_FAMILY_NAME: "WWS_FAMILY_NAME", - NAMEID_WWS_SUBFAMILY_NAME: "WWS_SUBFAMILY_NAME", - NAMEID_LIGHT_BACKGROUND_PALETTE: "LIGHT_BACKGROUND_PALETTE", - NAMEID_DARK_BACKGROUD_PALETTE: "DARK_BACKGROUD_PALETTE" + NAMEID_COPYRIGHT_NOTICE: "COPYRIGHT_NOTICE", + NAMEID_FONT_FAMILY_NAME: "FONT_FAMILY_NAME", + NAMEID_FONT_SUBFAMILY_NAME: "FONT_SUBFAMILY_NAME", + NAMEID_UNIQUE_FONT_IDENTIFIER: "UNIQUE_FONT_IDENTIFIER", + NAMEID_FULL_FONT_NAME: "FULL_FONT_NAME", + NAMEID_VERSION_STRING: "VERSION_STRING", + NAMEID_POSTSCRIPT_NAME: "POSTSCRIPT_NAME", + NAMEID_TRADEMARK: "TRADEMARK", + NAMEID_MANUFACTURER_NAME: "MANUFACTURER_NAME", + NAMEID_DESIGNER: "DESIGNER", + NAMEID_DESCRIPTION: "DESCRIPTION", + NAMEID_VENDOR_URL: "VENDOR_URL", + NAMEID_DESIGNER_URL: "DESIGNER_URL", + NAMEID_LICENSE_DESCRIPTION: "LICENSE_DESCRIPTION", + NAMEID_LICENSE_INFO_URL: "LICENSE_INFO_URL", + NAMEID_TYPOGRAPHIC_FAMILY_NAME: "TYPOGRAPHIC_FAMILY_NAME", + NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME: "TYPOGRAPHIC_SUBFAMILY_NAME", + NAMEID_COMPATIBLE_FULL_MACONLY: "COMPATIBLE_FULL_MACONLY", + NAMEID_SAMPLE_TEXT: "SAMPLE_TEXT", + NAMEID_POSTSCRIPT_CID_NAME: "POSTSCRIPT_CID_NAME", + NAMEID_WWS_FAMILY_NAME: "WWS_FAMILY_NAME", + NAMEID_WWS_SUBFAMILY_NAME: "WWS_SUBFAMILY_NAME", + NAMEID_LIGHT_BACKGROUND_PALETTE: "LIGHT_BACKGROUND_PALETTE", + NAMEID_DARK_BACKGROUD_PALETTE: "DARK_BACKGROUD_PALETTE", } # Platform IDs: @@ -79,17 +79,17 @@ PLATFORM_ID__CUSTOM = 4 PLATID_STR = { - PLATFORM_ID__UNICODE: "UNICODE", - PLATFORM_ID__MACINTOSH: "MACINTOSH", - PLATFORM_ID__ISO: "ISO", - PLATFORM_ID__WINDOWS: "WINDOWS", - PLATFORM_ID__CUSTOM: "CUSTOM" + PLATFORM_ID__UNICODE: "UNICODE", + PLATFORM_ID__MACINTOSH: "MACINTOSH", + PLATFORM_ID__ISO: "ISO", + PLATFORM_ID__WINDOWS: "WINDOWS", + PLATFORM_ID__CUSTOM: "CUSTOM", } OFL_LICENSE_INFO = ( - "This Font Software is licensed under the SIL Open Font License, " - "Version 1.1. This license is available with a FAQ at: " - "https://openfontlicense.org" + "This Font Software is licensed under the SIL Open Font License, " + "Version 1.1. This license is available with a FAQ at: " + "https://openfontlicense.org" ) OFL_LICENSE_URL = "https://openfontlicense.org" @@ -184,4 +184,4 @@ INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM -OTHER DEALINGS IN THE FONT SOFTWARE.""" \ No newline at end of file +OTHER DEALINGS IN THE FONT SOFTWARE.""" diff --git a/Lib/gftools/designers_pb2.py b/Lib/gftools/designers_pb2.py index 23f5c23a..ee28d88d 100644 --- a/Lib/gftools/designers_pb2.py +++ b/Lib/gftools/designers_pb2.py @@ -2,121 +2,167 @@ # source: designers.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - DESCRIPTOR = _descriptor.FileDescriptor( - name='designers.proto', - package='', - syntax='proto2', - serialized_options=None, - serialized_pb=_b('\n\x0f\x64\x65signers.proto\"Q\n\x11\x44\x65signerInfoProto\x12\x10\n\x08\x64\x65signer\x18\x01 \x01(\t\x12\x0c\n\x04link\x18\x02 \x01(\t\x12\x1c\n\x06\x61vatar\x18\x03 \x01(\x0b\x32\x0c.AvatarProto\" \n\x0b\x41vatarProto\x12\x11\n\tfile_name\x18\x01 \x01(\t') + name="designers.proto", + package="", + syntax="proto2", + serialized_options=None, + serialized_pb=_b( + '\n\x0f\x64\x65signers.proto"Q\n\x11\x44\x65signerInfoProto\x12\x10\n\x08\x64\x65signer\x18\x01 \x01(\t\x12\x0c\n\x04link\x18\x02 \x01(\t\x12\x1c\n\x06\x61vatar\x18\x03 \x01(\x0b\x32\x0c.AvatarProto" \n\x0b\x41vatarProto\x12\x11\n\tfile_name\x18\x01 \x01(\t' + ), ) - - _DESIGNERINFOPROTO = _descriptor.Descriptor( - name='DesignerInfoProto', - full_name='DesignerInfoProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='designer', full_name='DesignerInfoProto.designer', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='link', full_name='DesignerInfoProto.link', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='avatar', full_name='DesignerInfoProto.avatar', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=19, - serialized_end=100, + name="DesignerInfoProto", + full_name="DesignerInfoProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="designer", + full_name="DesignerInfoProto.designer", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="link", + full_name="DesignerInfoProto.link", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="avatar", + full_name="DesignerInfoProto.avatar", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=19, + serialized_end=100, ) _AVATARPROTO = _descriptor.Descriptor( - name='AvatarProto', - full_name='AvatarProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='file_name', full_name='AvatarProto.file_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=102, - serialized_end=134, + name="AvatarProto", + full_name="AvatarProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="file_name", + full_name="AvatarProto.file_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=102, + serialized_end=134, ) -_DESIGNERINFOPROTO.fields_by_name['avatar'].message_type = _AVATARPROTO -DESCRIPTOR.message_types_by_name['DesignerInfoProto'] = _DESIGNERINFOPROTO -DESCRIPTOR.message_types_by_name['AvatarProto'] = _AVATARPROTO +_DESIGNERINFOPROTO.fields_by_name["avatar"].message_type = _AVATARPROTO +DESCRIPTOR.message_types_by_name["DesignerInfoProto"] = _DESIGNERINFOPROTO +DESCRIPTOR.message_types_by_name["AvatarProto"] = _AVATARPROTO _sym_db.RegisterFileDescriptor(DESCRIPTOR) -DesignerInfoProto = _reflection.GeneratedProtocolMessageType('DesignerInfoProto', (_message.Message,), dict( - DESCRIPTOR = _DESIGNERINFOPROTO, - __module__ = 'designers_pb2' - # @@protoc_insertion_point(class_scope:DesignerInfoProto) - )) +DesignerInfoProto = _reflection.GeneratedProtocolMessageType( + "DesignerInfoProto", + (_message.Message,), + dict( + DESCRIPTOR=_DESIGNERINFOPROTO, + __module__="designers_pb2" + # @@protoc_insertion_point(class_scope:DesignerInfoProto) + ), +) _sym_db.RegisterMessage(DesignerInfoProto) -AvatarProto = _reflection.GeneratedProtocolMessageType('AvatarProto', (_message.Message,), dict( - DESCRIPTOR = _AVATARPROTO, - __module__ = 'designers_pb2' - # @@protoc_insertion_point(class_scope:AvatarProto) - )) +AvatarProto = _reflection.GeneratedProtocolMessageType( + "AvatarProto", + (_message.Message,), + dict( + DESCRIPTOR=_AVATARPROTO, + __module__="designers_pb2" + # @@protoc_insertion_point(class_scope:AvatarProto) + ), +) _sym_db.RegisterMessage(AvatarProto) diff --git a/Lib/gftools/fix.py b/Lib/gftools/fix.py index 56b5101b..c2dfe878 100644 --- a/Lib/gftools/fix.py +++ b/Lib/gftools/fix.py @@ -1001,7 +1001,6 @@ def fix(self): class GaspFixer(FontFixer): - def fix(self, value=15): try: table = self.font.get("gasp") diff --git a/Lib/gftools/fonts_public_pb2.py b/Lib/gftools/fonts_public_pb2.py index f95ae856..b9ef051a 100644 --- a/Lib/gftools/fonts_public_pb2.py +++ b/Lib/gftools/fonts_public_pb2.py @@ -6,47 +6,47 @@ from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12\x66onts_public.proto\x12\x13google.fonts_public\"\xc4\x07\n\x0b\x46\x61milyProto\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x10\n\x08\x64\x65signer\x18\x02 \x02(\t\x12\x0f\n\x07license\x18\x03 \x02(\t\x12\x10\n\x08\x63\x61tegory\x18\x04 \x03(\t\x12\x12\n\ndate_added\x18\x05 \x02(\t\x12-\n\x05\x66onts\x18\x06 \x03(\x0b\x32\x1e.google.fonts_public.FontProto\x12\x0f\n\x07\x61liases\x18\x07 \x03(\t\x12\x0f\n\x07subsets\x18\x08 \x03(\t\x12\x19\n\x11ttf_autohint_args\x18\t \x01(\t\x12\x33\n\x04\x61xes\x18\n \x03(\x0b\x32%.google.fonts_public.AxisSegmentProto\x12\x62\n\x1aregistry_default_overrides\x18\x0b \x03(\x0b\x32>.google.fonts_public.FamilyProto.RegistryDefaultOverridesEntry\x12\x30\n\x06source\x18\x0c \x01(\x0b\x32 .google.fonts_public.SourceProto\x12\x0f\n\x07is_noto\x18\r \x01(\x08\x12\x11\n\tlanguages\x18\x0e \x03(\t\x12;\n\tfallbacks\x18\x0f \x03(\x0b\x32(.google.fonts_public.FamilyFallbackProto\x12I\n\rsample_glyphs\x18\x10 \x03(\x0b\x32\x32.google.fonts_public.FamilyProto.SampleGlyphsEntry\x12\x39\n\x0bsample_text\x18\x11 \x01(\x0b\x32$.google.fonts_public.SampleTextProto\x12\x14\n\x0c\x64isplay_name\x18\x12 \x01(\t\x12\x43\n\x15ordered_sample_glyphs\x18\x13 \x03(\x0b\x32$.google.fonts_public.GlyphGroupProto\x12\x14\n\x0cminisite_url\x18\x14 \x01(\t\x12\x16\n\x0eprimary_script\x18\x15 \x01(\t\x12\x18\n\x10primary_language\x18\x16 \x01(\t\x12\x0e\n\x06stroke\x18\x17 \x01(\t\x12\x17\n\x0f\x63lassifications\x18\x19 \x03(\t\x1a?\n\x1dRegistryDefaultOverridesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a\x33\n\x11SampleGlyphsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8a\x01\n\tFontProto\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05style\x18\x02 \x02(\t\x12\x0e\n\x06weight\x18\x03 \x02(\x05\x12\x10\n\x08\x66ilename\x18\x04 \x02(\t\x12\x18\n\x10post_script_name\x18\x05 \x02(\t\x12\x11\n\tfull_name\x18\x06 \x02(\t\x12\x11\n\tcopyright\x18\x07 \x01(\t\"Z\n\x10\x41xisSegmentProto\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x11\n\tmin_value\x18\x02 \x01(\x02\x12\x11\n\tmax_value\x18\x04 \x01(\x02J\x04\x08\x03\x10\x04R\rdefault_value\"\x8f\x01\n\x0bSourceProto\x12\x16\n\x0erepository_url\x18\x01 \x01(\t\x12\x0e\n\x06\x62ranch\x18\x05 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x02 \x01(\t\x12\x13\n\x0b\x61rchive_url\x18\x03 \x01(\t\x12\x33\n\x05\x66iles\x18\x04 \x03(\x0b\x32$.google.fonts_public.SourceFileProto\"9\n\x0fSourceFileProto\x12\x13\n\x0bsource_file\x18\x01 \x01(\t\x12\x11\n\tdest_file\x18\x02 \x01(\t\"H\n\x0bTargetProto\x12\x39\n\x0btarget_type\x18\x01 \x01(\x0e\x32$.google.fonts_public.TargetTypeProto\"\xcc\x01\n\x13\x46\x61milyFallbackProto\x12:\n\x0b\x61xis_target\x18\x01 \x03(\x0b\x32%.google.fonts_public.AxisSegmentProto\x12\x30\n\x06target\x18\x02 \x03(\x0b\x32 .google.fonts_public.TargetProto\x12\x17\n\x0fsize_adjust_pct\x18\x03 \x01(\x02\x12\x1b\n\x13\x61scent_override_pct\x18\x05 \x01(\x02\x12\x11\n\tlocal_src\x18\x04 \x03(\t\"\x92\x02\n\x0fSampleTextProto\x12\x15\n\rmasthead_full\x18\x01 \x01(\t\x12\x18\n\x10masthead_partial\x18\x02 \x01(\t\x12\x0e\n\x06styles\x18\x03 \x01(\t\x12\x0e\n\x06tester\x18\x04 \x01(\t\x12\x11\n\tposter_sm\x18\x05 \x01(\t\x12\x11\n\tposter_md\x18\x06 \x01(\t\x12\x11\n\tposter_lg\x18\x07 \x01(\t\x12\x13\n\x0bspecimen_48\x18\x08 \x01(\t\x12\x13\n\x0bspecimen_36\x18\t \x01(\t\x12\x13\n\x0bspecimen_32\x18\n \x01(\t\x12\x13\n\x0bspecimen_21\x18\x0b \x01(\t\x12\x13\n\x0bspecimen_16\x18\x0c \x01(\t\x12\x0c\n\x04note\x18\r \x01(\t\"/\n\x0fGlyphGroupProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06glyphs\x18\x02 \x01(\t*\x92\x01\n\x0fTargetTypeProto\x12\x16\n\x12TARGET_UNSPECIFIED\x10\x00\x12\x15\n\x11TARGET_OS_WINDOWS\x10\x01\x12\x11\n\rTARGET_OS_MAC\x10\x02\x12\x13\n\x0fTARGET_OS_LINUX\x10\x03\x12\x15\n\x11TARGET_OS_ANDROID\x10\x04\x12\x11\n\rTARGET_OS_IOS\x10\x05\x42%\n\x16\x63om.google.fonts.protoB\x0b\x46ontsPublic') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x12\x66onts_public.proto\x12\x13google.fonts_public"\xc4\x07\n\x0b\x46\x61milyProto\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x10\n\x08\x64\x65signer\x18\x02 \x02(\t\x12\x0f\n\x07license\x18\x03 \x02(\t\x12\x10\n\x08\x63\x61tegory\x18\x04 \x03(\t\x12\x12\n\ndate_added\x18\x05 \x02(\t\x12-\n\x05\x66onts\x18\x06 \x03(\x0b\x32\x1e.google.fonts_public.FontProto\x12\x0f\n\x07\x61liases\x18\x07 \x03(\t\x12\x0f\n\x07subsets\x18\x08 \x03(\t\x12\x19\n\x11ttf_autohint_args\x18\t \x01(\t\x12\x33\n\x04\x61xes\x18\n \x03(\x0b\x32%.google.fonts_public.AxisSegmentProto\x12\x62\n\x1aregistry_default_overrides\x18\x0b \x03(\x0b\x32>.google.fonts_public.FamilyProto.RegistryDefaultOverridesEntry\x12\x30\n\x06source\x18\x0c \x01(\x0b\x32 .google.fonts_public.SourceProto\x12\x0f\n\x07is_noto\x18\r \x01(\x08\x12\x11\n\tlanguages\x18\x0e \x03(\t\x12;\n\tfallbacks\x18\x0f \x03(\x0b\x32(.google.fonts_public.FamilyFallbackProto\x12I\n\rsample_glyphs\x18\x10 \x03(\x0b\x32\x32.google.fonts_public.FamilyProto.SampleGlyphsEntry\x12\x39\n\x0bsample_text\x18\x11 \x01(\x0b\x32$.google.fonts_public.SampleTextProto\x12\x14\n\x0c\x64isplay_name\x18\x12 \x01(\t\x12\x43\n\x15ordered_sample_glyphs\x18\x13 \x03(\x0b\x32$.google.fonts_public.GlyphGroupProto\x12\x14\n\x0cminisite_url\x18\x14 \x01(\t\x12\x16\n\x0eprimary_script\x18\x15 \x01(\t\x12\x18\n\x10primary_language\x18\x16 \x01(\t\x12\x0e\n\x06stroke\x18\x17 \x01(\t\x12\x17\n\x0f\x63lassifications\x18\x19 \x03(\t\x1a?\n\x1dRegistryDefaultOverridesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a\x33\n\x11SampleGlyphsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x8a\x01\n\tFontProto\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05style\x18\x02 \x02(\t\x12\x0e\n\x06weight\x18\x03 \x02(\x05\x12\x10\n\x08\x66ilename\x18\x04 \x02(\t\x12\x18\n\x10post_script_name\x18\x05 \x02(\t\x12\x11\n\tfull_name\x18\x06 \x02(\t\x12\x11\n\tcopyright\x18\x07 \x01(\t"Z\n\x10\x41xisSegmentProto\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x11\n\tmin_value\x18\x02 \x01(\x02\x12\x11\n\tmax_value\x18\x04 \x01(\x02J\x04\x08\x03\x10\x04R\rdefault_value"\x8f\x01\n\x0bSourceProto\x12\x16\n\x0erepository_url\x18\x01 \x01(\t\x12\x0e\n\x06\x62ranch\x18\x05 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x02 \x01(\t\x12\x13\n\x0b\x61rchive_url\x18\x03 \x01(\t\x12\x33\n\x05\x66iles\x18\x04 \x03(\x0b\x32$.google.fonts_public.SourceFileProto"9\n\x0fSourceFileProto\x12\x13\n\x0bsource_file\x18\x01 \x01(\t\x12\x11\n\tdest_file\x18\x02 \x01(\t"H\n\x0bTargetProto\x12\x39\n\x0btarget_type\x18\x01 \x01(\x0e\x32$.google.fonts_public.TargetTypeProto"\xcc\x01\n\x13\x46\x61milyFallbackProto\x12:\n\x0b\x61xis_target\x18\x01 \x03(\x0b\x32%.google.fonts_public.AxisSegmentProto\x12\x30\n\x06target\x18\x02 \x03(\x0b\x32 .google.fonts_public.TargetProto\x12\x17\n\x0fsize_adjust_pct\x18\x03 \x01(\x02\x12\x1b\n\x13\x61scent_override_pct\x18\x05 \x01(\x02\x12\x11\n\tlocal_src\x18\x04 \x03(\t"\x92\x02\n\x0fSampleTextProto\x12\x15\n\rmasthead_full\x18\x01 \x01(\t\x12\x18\n\x10masthead_partial\x18\x02 \x01(\t\x12\x0e\n\x06styles\x18\x03 \x01(\t\x12\x0e\n\x06tester\x18\x04 \x01(\t\x12\x11\n\tposter_sm\x18\x05 \x01(\t\x12\x11\n\tposter_md\x18\x06 \x01(\t\x12\x11\n\tposter_lg\x18\x07 \x01(\t\x12\x13\n\x0bspecimen_48\x18\x08 \x01(\t\x12\x13\n\x0bspecimen_36\x18\t \x01(\t\x12\x13\n\x0bspecimen_32\x18\n \x01(\t\x12\x13\n\x0bspecimen_21\x18\x0b \x01(\t\x12\x13\n\x0bspecimen_16\x18\x0c \x01(\t\x12\x0c\n\x04note\x18\r \x01(\t"/\n\x0fGlyphGroupProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06glyphs\x18\x02 \x01(\t*\x92\x01\n\x0fTargetTypeProto\x12\x16\n\x12TARGET_UNSPECIFIED\x10\x00\x12\x15\n\x11TARGET_OS_WINDOWS\x10\x01\x12\x11\n\rTARGET_OS_MAC\x10\x02\x12\x13\n\x0fTARGET_OS_LINUX\x10\x03\x12\x15\n\x11TARGET_OS_ANDROID\x10\x04\x12\x11\n\rTARGET_OS_IOS\x10\x05\x42%\n\x16\x63om.google.fonts.protoB\x0b\x46ontsPublic' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'fonts_public_pb2', globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "fonts_public_pb2", globals()) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n\026com.google.fonts.protoB\013FontsPublic' - _FAMILYPROTO_REGISTRYDEFAULTOVERRIDESENTRY._options = None - _FAMILYPROTO_REGISTRYDEFAULTOVERRIDESENTRY._serialized_options = b'8\001' - _FAMILYPROTO_SAMPLEGLYPHSENTRY._options = None - _FAMILYPROTO_SAMPLEGLYPHSENTRY._serialized_options = b'8\001' - _TARGETTYPEPROTO._serialized_start=2056 - _TARGETTYPEPROTO._serialized_end=2202 - _FAMILYPROTO._serialized_start=44 - _FAMILYPROTO._serialized_end=1008 - _FAMILYPROTO_REGISTRYDEFAULTOVERRIDESENTRY._serialized_start=892 - _FAMILYPROTO_REGISTRYDEFAULTOVERRIDESENTRY._serialized_end=955 - _FAMILYPROTO_SAMPLEGLYPHSENTRY._serialized_start=957 - _FAMILYPROTO_SAMPLEGLYPHSENTRY._serialized_end=1008 - _FONTPROTO._serialized_start=1011 - _FONTPROTO._serialized_end=1149 - _AXISSEGMENTPROTO._serialized_start=1151 - _AXISSEGMENTPROTO._serialized_end=1241 - _SOURCEPROTO._serialized_start=1244 - _SOURCEPROTO._serialized_end=1387 - _SOURCEFILEPROTO._serialized_start=1389 - _SOURCEFILEPROTO._serialized_end=1446 - _TARGETPROTO._serialized_start=1448 - _TARGETPROTO._serialized_end=1520 - _FAMILYFALLBACKPROTO._serialized_start=1523 - _FAMILYFALLBACKPROTO._serialized_end=1727 - _SAMPLETEXTPROTO._serialized_start=1730 - _SAMPLETEXTPROTO._serialized_end=2004 - _GLYPHGROUPPROTO._serialized_start=2006 - _GLYPHGROUPPROTO._serialized_end=2053 + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b"\n\026com.google.fonts.protoB\013FontsPublic" + _FAMILYPROTO_REGISTRYDEFAULTOVERRIDESENTRY._options = None + _FAMILYPROTO_REGISTRYDEFAULTOVERRIDESENTRY._serialized_options = b"8\001" + _FAMILYPROTO_SAMPLEGLYPHSENTRY._options = None + _FAMILYPROTO_SAMPLEGLYPHSENTRY._serialized_options = b"8\001" + _TARGETTYPEPROTO._serialized_start = 2056 + _TARGETTYPEPROTO._serialized_end = 2202 + _FAMILYPROTO._serialized_start = 44 + _FAMILYPROTO._serialized_end = 1008 + _FAMILYPROTO_REGISTRYDEFAULTOVERRIDESENTRY._serialized_start = 892 + _FAMILYPROTO_REGISTRYDEFAULTOVERRIDESENTRY._serialized_end = 955 + _FAMILYPROTO_SAMPLEGLYPHSENTRY._serialized_start = 957 + _FAMILYPROTO_SAMPLEGLYPHSENTRY._serialized_end = 1008 + _FONTPROTO._serialized_start = 1011 + _FONTPROTO._serialized_end = 1149 + _AXISSEGMENTPROTO._serialized_start = 1151 + _AXISSEGMENTPROTO._serialized_end = 1241 + _SOURCEPROTO._serialized_start = 1244 + _SOURCEPROTO._serialized_end = 1387 + _SOURCEFILEPROTO._serialized_start = 1389 + _SOURCEFILEPROTO._serialized_end = 1446 + _TARGETPROTO._serialized_start = 1448 + _TARGETPROTO._serialized_end = 1520 + _FAMILYFALLBACKPROTO._serialized_start = 1523 + _FAMILYFALLBACKPROTO._serialized_end = 1727 + _SAMPLETEXTPROTO._serialized_start = 1730 + _SAMPLETEXTPROTO._serialized_end = 2004 + _GLYPHGROUPPROTO._serialized_start = 2006 + _GLYPHGROUPPROTO._serialized_end = 2053 # @@protoc_insertion_point(module_scope) diff --git a/Lib/gftools/knowledge_pb2.py b/Lib/gftools/knowledge_pb2.py index 53e3ac4c..746a0a46 100644 --- a/Lib/gftools/knowledge_pb2.py +++ b/Lib/gftools/knowledge_pb2.py @@ -6,430 +6,736 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - DESCRIPTOR = _descriptor.FileDescriptor( - name='knowledge.proto', - package='fonts', - syntax='proto2', - serialized_options=b'\n\026com.google.fonts.protoB\tKnowledge', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\x0fknowledge.proto\x12\x05\x66onts\"!\n\x0eKnowledgeProto\x12\x0f\n\x07modules\x18\x01 \x03(\t\"N\n\x0bModuleProto\x12\x0f\n\x07\x61lt_ids\x18\x01 \x03(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x65xcerpt\x18\x03 \x01(\t\x12\x0f\n\x07lessons\x18\x04 \x03(\t\"+\n\nTopicProto\x12\x0f\n\x07\x61lt_ids\x18\x01 \x03(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\xd2\x01\n\x0bLessonProto\x12\x0f\n\x07\x61lt_ids\x18\x01 \x03(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x61uthors\x18\x03 \x03(\t\x12\x11\n\treviewers\x18\x04 \x03(\t\x12\x0e\n\x06topics\x18\x05 \x03(\t\x12\x14\n\x0cprev_lessons\x18\x06 \x03(\t\x12\x14\n\x0cnext_lessons\x18\x07 \x03(\t\x12\x15\n\rrelated_terms\x18\x08 \x03(\t\x12\x0f\n\x07\x65xcerpt\x18\t \x01(\t\x12\x1c\n\x14related_content_urls\x18\n \x03(\t\"r\n\tTermProto\x12\x0f\n\x07\x61lt_ids\x18\x01 \x03(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x65xcerpt\x18\x03 \x01(\t\x12\x17\n\x0frelated_lessons\x18\x04 \x03(\t\x12\x1c\n\x14related_content_urls\x18\x05 \x03(\t\"\x8d\x01\n\x11\x43ontributorsProto\x12?\n\x0c\x63ontributors\x18\x01 \x03(\x0b\x32).fonts.ContributorsProto.ContributorProto\x1a\x37\n\x10\x43ontributorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\rpersonal_site\x18\x02 \x01(\tB#\n\x16\x63om.google.fonts.protoB\tKnowledge' + name="knowledge.proto", + package="fonts", + syntax="proto2", + serialized_options=b"\n\026com.google.fonts.protoB\tKnowledge", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x0fknowledge.proto\x12\x05\x66onts"!\n\x0eKnowledgeProto\x12\x0f\n\x07modules\x18\x01 \x03(\t"N\n\x0bModuleProto\x12\x0f\n\x07\x61lt_ids\x18\x01 \x03(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x65xcerpt\x18\x03 \x01(\t\x12\x0f\n\x07lessons\x18\x04 \x03(\t"+\n\nTopicProto\x12\x0f\n\x07\x61lt_ids\x18\x01 \x03(\t\x12\x0c\n\x04name\x18\x02 \x01(\t"\xd2\x01\n\x0bLessonProto\x12\x0f\n\x07\x61lt_ids\x18\x01 \x03(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x61uthors\x18\x03 \x03(\t\x12\x11\n\treviewers\x18\x04 \x03(\t\x12\x0e\n\x06topics\x18\x05 \x03(\t\x12\x14\n\x0cprev_lessons\x18\x06 \x03(\t\x12\x14\n\x0cnext_lessons\x18\x07 \x03(\t\x12\x15\n\rrelated_terms\x18\x08 \x03(\t\x12\x0f\n\x07\x65xcerpt\x18\t \x01(\t\x12\x1c\n\x14related_content_urls\x18\n \x03(\t"r\n\tTermProto\x12\x0f\n\x07\x61lt_ids\x18\x01 \x03(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x65xcerpt\x18\x03 \x01(\t\x12\x17\n\x0frelated_lessons\x18\x04 \x03(\t\x12\x1c\n\x14related_content_urls\x18\x05 \x03(\t"\x8d\x01\n\x11\x43ontributorsProto\x12?\n\x0c\x63ontributors\x18\x01 \x03(\x0b\x32).fonts.ContributorsProto.ContributorProto\x1a\x37\n\x10\x43ontributorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\rpersonal_site\x18\x02 \x01(\tB#\n\x16\x63om.google.fonts.protoB\tKnowledge', ) - - _KNOWLEDGEPROTO = _descriptor.Descriptor( - name='KnowledgeProto', - full_name='fonts.KnowledgeProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='modules', full_name='fonts.KnowledgeProto.modules', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=26, - serialized_end=59, + name="KnowledgeProto", + full_name="fonts.KnowledgeProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="modules", + full_name="fonts.KnowledgeProto.modules", + index=0, + number=1, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=26, + serialized_end=59, ) _MODULEPROTO = _descriptor.Descriptor( - name='ModuleProto', - full_name='fonts.ModuleProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='alt_ids', full_name='fonts.ModuleProto.alt_ids', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='name', full_name='fonts.ModuleProto.name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='excerpt', full_name='fonts.ModuleProto.excerpt', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='lessons', full_name='fonts.ModuleProto.lessons', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=61, - serialized_end=139, + name="ModuleProto", + full_name="fonts.ModuleProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="alt_ids", + full_name="fonts.ModuleProto.alt_ids", + index=0, + number=1, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="name", + full_name="fonts.ModuleProto.name", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="excerpt", + full_name="fonts.ModuleProto.excerpt", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="lessons", + full_name="fonts.ModuleProto.lessons", + index=3, + number=4, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=61, + serialized_end=139, ) _TOPICPROTO = _descriptor.Descriptor( - name='TopicProto', - full_name='fonts.TopicProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='alt_ids', full_name='fonts.TopicProto.alt_ids', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='name', full_name='fonts.TopicProto.name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=141, - serialized_end=184, + name="TopicProto", + full_name="fonts.TopicProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="alt_ids", + full_name="fonts.TopicProto.alt_ids", + index=0, + number=1, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="name", + full_name="fonts.TopicProto.name", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=141, + serialized_end=184, ) _LESSONPROTO = _descriptor.Descriptor( - name='LessonProto', - full_name='fonts.LessonProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='alt_ids', full_name='fonts.LessonProto.alt_ids', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='name', full_name='fonts.LessonProto.name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='authors', full_name='fonts.LessonProto.authors', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='reviewers', full_name='fonts.LessonProto.reviewers', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='topics', full_name='fonts.LessonProto.topics', index=4, - number=5, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='prev_lessons', full_name='fonts.LessonProto.prev_lessons', index=5, - number=6, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='next_lessons', full_name='fonts.LessonProto.next_lessons', index=6, - number=7, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='related_terms', full_name='fonts.LessonProto.related_terms', index=7, - number=8, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='excerpt', full_name='fonts.LessonProto.excerpt', index=8, - number=9, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='related_content_urls', full_name='fonts.LessonProto.related_content_urls', index=9, - number=10, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=187, - serialized_end=397, + name="LessonProto", + full_name="fonts.LessonProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="alt_ids", + full_name="fonts.LessonProto.alt_ids", + index=0, + number=1, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="name", + full_name="fonts.LessonProto.name", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="authors", + full_name="fonts.LessonProto.authors", + index=2, + number=3, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="reviewers", + full_name="fonts.LessonProto.reviewers", + index=3, + number=4, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="topics", + full_name="fonts.LessonProto.topics", + index=4, + number=5, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="prev_lessons", + full_name="fonts.LessonProto.prev_lessons", + index=5, + number=6, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="next_lessons", + full_name="fonts.LessonProto.next_lessons", + index=6, + number=7, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="related_terms", + full_name="fonts.LessonProto.related_terms", + index=7, + number=8, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="excerpt", + full_name="fonts.LessonProto.excerpt", + index=8, + number=9, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="related_content_urls", + full_name="fonts.LessonProto.related_content_urls", + index=9, + number=10, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=187, + serialized_end=397, ) _TERMPROTO = _descriptor.Descriptor( - name='TermProto', - full_name='fonts.TermProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='alt_ids', full_name='fonts.TermProto.alt_ids', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='name', full_name='fonts.TermProto.name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='excerpt', full_name='fonts.TermProto.excerpt', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='related_lessons', full_name='fonts.TermProto.related_lessons', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='related_content_urls', full_name='fonts.TermProto.related_content_urls', index=4, - number=5, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=399, - serialized_end=513, + name="TermProto", + full_name="fonts.TermProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="alt_ids", + full_name="fonts.TermProto.alt_ids", + index=0, + number=1, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="name", + full_name="fonts.TermProto.name", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="excerpt", + full_name="fonts.TermProto.excerpt", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="related_lessons", + full_name="fonts.TermProto.related_lessons", + index=3, + number=4, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="related_content_urls", + full_name="fonts.TermProto.related_content_urls", + index=4, + number=5, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=399, + serialized_end=513, ) _CONTRIBUTORSPROTO_CONTRIBUTORPROTO = _descriptor.Descriptor( - name='ContributorProto', - full_name='fonts.ContributorsProto.ContributorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='fonts.ContributorsProto.ContributorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='personal_site', full_name='fonts.ContributorsProto.ContributorProto.personal_site', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=602, - serialized_end=657, + name="ContributorProto", + full_name="fonts.ContributorsProto.ContributorProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="fonts.ContributorsProto.ContributorProto.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="personal_site", + full_name="fonts.ContributorsProto.ContributorProto.personal_site", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=602, + serialized_end=657, ) _CONTRIBUTORSPROTO = _descriptor.Descriptor( - name='ContributorsProto', - full_name='fonts.ContributorsProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='contributors', full_name='fonts.ContributorsProto.contributors', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[_CONTRIBUTORSPROTO_CONTRIBUTORPROTO, ], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=516, - serialized_end=657, + name="ContributorsProto", + full_name="fonts.ContributorsProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="contributors", + full_name="fonts.ContributorsProto.contributors", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[ + _CONTRIBUTORSPROTO_CONTRIBUTORPROTO, + ], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=516, + serialized_end=657, ) _CONTRIBUTORSPROTO_CONTRIBUTORPROTO.containing_type = _CONTRIBUTORSPROTO -_CONTRIBUTORSPROTO.fields_by_name['contributors'].message_type = _CONTRIBUTORSPROTO_CONTRIBUTORPROTO -DESCRIPTOR.message_types_by_name['KnowledgeProto'] = _KNOWLEDGEPROTO -DESCRIPTOR.message_types_by_name['ModuleProto'] = _MODULEPROTO -DESCRIPTOR.message_types_by_name['TopicProto'] = _TOPICPROTO -DESCRIPTOR.message_types_by_name['LessonProto'] = _LESSONPROTO -DESCRIPTOR.message_types_by_name['TermProto'] = _TERMPROTO -DESCRIPTOR.message_types_by_name['ContributorsProto'] = _CONTRIBUTORSPROTO +_CONTRIBUTORSPROTO.fields_by_name[ + "contributors" +].message_type = _CONTRIBUTORSPROTO_CONTRIBUTORPROTO +DESCRIPTOR.message_types_by_name["KnowledgeProto"] = _KNOWLEDGEPROTO +DESCRIPTOR.message_types_by_name["ModuleProto"] = _MODULEPROTO +DESCRIPTOR.message_types_by_name["TopicProto"] = _TOPICPROTO +DESCRIPTOR.message_types_by_name["LessonProto"] = _LESSONPROTO +DESCRIPTOR.message_types_by_name["TermProto"] = _TERMPROTO +DESCRIPTOR.message_types_by_name["ContributorsProto"] = _CONTRIBUTORSPROTO _sym_db.RegisterFileDescriptor(DESCRIPTOR) -KnowledgeProto = _reflection.GeneratedProtocolMessageType('KnowledgeProto', (_message.Message,), { - 'DESCRIPTOR' : _KNOWLEDGEPROTO, - '__module__' : 'knowledge_pb2' - # @@protoc_insertion_point(class_scope:fonts.KnowledgeProto) - }) +KnowledgeProto = _reflection.GeneratedProtocolMessageType( + "KnowledgeProto", + (_message.Message,), + { + "DESCRIPTOR": _KNOWLEDGEPROTO, + "__module__": "knowledge_pb2" + # @@protoc_insertion_point(class_scope:fonts.KnowledgeProto) + }, +) _sym_db.RegisterMessage(KnowledgeProto) -ModuleProto = _reflection.GeneratedProtocolMessageType('ModuleProto', (_message.Message,), { - 'DESCRIPTOR' : _MODULEPROTO, - '__module__' : 'knowledge_pb2' - # @@protoc_insertion_point(class_scope:fonts.ModuleProto) - }) +ModuleProto = _reflection.GeneratedProtocolMessageType( + "ModuleProto", + (_message.Message,), + { + "DESCRIPTOR": _MODULEPROTO, + "__module__": "knowledge_pb2" + # @@protoc_insertion_point(class_scope:fonts.ModuleProto) + }, +) _sym_db.RegisterMessage(ModuleProto) -TopicProto = _reflection.GeneratedProtocolMessageType('TopicProto', (_message.Message,), { - 'DESCRIPTOR' : _TOPICPROTO, - '__module__' : 'knowledge_pb2' - # @@protoc_insertion_point(class_scope:fonts.TopicProto) - }) +TopicProto = _reflection.GeneratedProtocolMessageType( + "TopicProto", + (_message.Message,), + { + "DESCRIPTOR": _TOPICPROTO, + "__module__": "knowledge_pb2" + # @@protoc_insertion_point(class_scope:fonts.TopicProto) + }, +) _sym_db.RegisterMessage(TopicProto) -LessonProto = _reflection.GeneratedProtocolMessageType('LessonProto', (_message.Message,), { - 'DESCRIPTOR' : _LESSONPROTO, - '__module__' : 'knowledge_pb2' - # @@protoc_insertion_point(class_scope:fonts.LessonProto) - }) +LessonProto = _reflection.GeneratedProtocolMessageType( + "LessonProto", + (_message.Message,), + { + "DESCRIPTOR": _LESSONPROTO, + "__module__": "knowledge_pb2" + # @@protoc_insertion_point(class_scope:fonts.LessonProto) + }, +) _sym_db.RegisterMessage(LessonProto) -TermProto = _reflection.GeneratedProtocolMessageType('TermProto', (_message.Message,), { - 'DESCRIPTOR' : _TERMPROTO, - '__module__' : 'knowledge_pb2' - # @@protoc_insertion_point(class_scope:fonts.TermProto) - }) +TermProto = _reflection.GeneratedProtocolMessageType( + "TermProto", + (_message.Message,), + { + "DESCRIPTOR": _TERMPROTO, + "__module__": "knowledge_pb2" + # @@protoc_insertion_point(class_scope:fonts.TermProto) + }, +) _sym_db.RegisterMessage(TermProto) -ContributorsProto = _reflection.GeneratedProtocolMessageType('ContributorsProto', (_message.Message,), { - - 'ContributorProto' : _reflection.GeneratedProtocolMessageType('ContributorProto', (_message.Message,), { - 'DESCRIPTOR' : _CONTRIBUTORSPROTO_CONTRIBUTORPROTO, - '__module__' : 'knowledge_pb2' - # @@protoc_insertion_point(class_scope:fonts.ContributorsProto.ContributorProto) - }) - , - 'DESCRIPTOR' : _CONTRIBUTORSPROTO, - '__module__' : 'knowledge_pb2' - # @@protoc_insertion_point(class_scope:fonts.ContributorsProto) - }) +ContributorsProto = _reflection.GeneratedProtocolMessageType( + "ContributorsProto", + (_message.Message,), + { + "ContributorProto": _reflection.GeneratedProtocolMessageType( + "ContributorProto", + (_message.Message,), + { + "DESCRIPTOR": _CONTRIBUTORSPROTO_CONTRIBUTORPROTO, + "__module__": "knowledge_pb2" + # @@protoc_insertion_point(class_scope:fonts.ContributorsProto.ContributorProto) + }, + ), + "DESCRIPTOR": _CONTRIBUTORSPROTO, + "__module__": "knowledge_pb2" + # @@protoc_insertion_point(class_scope:fonts.ContributorsProto) + }, +) _sym_db.RegisterMessage(ContributorsProto) _sym_db.RegisterMessage(ContributorsProto.ContributorProto) diff --git a/Lib/gftools/packager.py b/Lib/gftools/packager.py index 7a64707d..644b2feb 100644 --- a/Lib/gftools/packager.py +++ b/Lib/gftools/packager.py @@ -95,7 +95,7 @@ """ -ADD_TO_TRAFFIC_JAM=""" +ADD_TO_TRAFFIC_JAM = """ mutation {{ addProjectV2ItemById( input: {{ @@ -564,7 +564,7 @@ def pr_family( project_id=TRAFFIC_JAM_ID, content_id=resp["node_id"], ), - {} + {}, ) return True diff --git a/Lib/gftools/qa.py b/Lib/gftools/qa.py index 007cac45..4a406199 100644 --- a/Lib/gftools/qa.py +++ b/Lib/gftools/qa.py @@ -6,6 +6,7 @@ from gftools.gfgithub import GitHubClient from gftools.utils import mkdir import sys + try: from diffenator2 import ninja_diff, ninja_proof except ModuleNotFoundError: @@ -30,7 +31,7 @@ def safe_call(self, *args, **kwargs): print(msg) print() print(traceback.format_exc()) - self.post_to_github(msg+"\n\n"+"See CI logs for more details") + self.post_to_github(msg + "\n\n" + "See CI logs for more details") return safe_call @@ -179,4 +180,3 @@ def post_to_github(self, text): "is not allowed to access the repo's secrets for " f"security reasons. Full traceback:\n{e}" ) - diff --git a/Lib/gftools/scripts/__init__.py b/Lib/gftools/scripts/__init__.py index f4bc7eaf..671f3914 100644 --- a/Lib/gftools/scripts/__init__.py +++ b/Lib/gftools/scripts/__init__.py @@ -26,6 +26,7 @@ from gftools._version import version as __version__ + def _get_subcommands(): subcommands = {} for module in Path(__file__).parent.glob("*.py"): @@ -58,27 +59,33 @@ def print_menu(): subcommands = _get_subcommands() -description = "Run gftools subcommands:{0}".format(''.join( - ['\n {0}'.format(sc) for sc in sorted(subcommands.keys())])) +description = "Run gftools subcommands:{0}".format( + "".join(["\n {0}".format(sc) for sc in sorted(subcommands.keys())]) +) -description += ("\n\nSubcommands have their own help messages.\n" - "These are usually accessible with the -h/--help\n" - "flag positioned after the subcommand.\n" - "I.e.: gftools subcommand -h") +description += ( + "\n\nSubcommands have their own help messages.\n" + "These are usually accessible with the -h/--help\n" + "flag positioned after the subcommand.\n" + "I.e.: gftools subcommand -h" +) -parser = argparse.ArgumentParser(description=description, - formatter_class=RawTextHelpFormatter) -parser.add_argument('subcommand', - nargs=1, - help="the subcommand to execute") +parser = argparse.ArgumentParser( + description=description, formatter_class=RawTextHelpFormatter +) +parser.add_argument("subcommand", nargs=1, help="the subcommand to execute") -parser.add_argument('--list-subcommands', action='store_true', - help='print the list of subcommnds ' - 'to stdout, separated by a space character. This is ' - 'usually only used to generate the shell completion code.') +parser.add_argument( + "--list-subcommands", + action="store_true", + help="print the list of subcommnds " + "to stdout, separated by a space character. This is " + "usually only used to generate the shell completion code.", +) -parser.add_argument('--version', '-v', action='version', - version='%(prog)s ' + __version__) +parser.add_argument( + "--version", "-v", action="version", version="%(prog)s " + __version__ +) def main(args=None): @@ -90,12 +97,13 @@ def main(args=None): mod = import_module(f".{module}", package) mod.main(args[2:]) elif "--list-subcommands" in sys.argv: - print(' '.join(list(sorted(subcommands.keys())))) + print(" ".join(list(sorted(subcommands.keys())))) else: # shows menu and help if no args print_menu() args = parser.parse_args() parser.print_help() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/Lib/gftools/scripts/add_axis.py b/Lib/gftools/scripts/add_axis.py index 23e43ce3..b9f56e6c 100755 --- a/Lib/gftools/scripts/add_axis.py +++ b/Lib/gftools/scripts/add_axis.py @@ -8,14 +8,11 @@ parser = argparse.ArgumentParser( - prog='gftools add-axis', + prog="gftools add-axis", description=__doc__, - ) +) -parser.add_argument( - 'font', - type=str, - help='The font file to the axis values from.') +parser.add_argument("font", type=str, help="The font file to the axis values from.") class ProgramAbortError(Exception): @@ -29,56 +26,61 @@ class UserAbortError(Exception): def _get_fvar_axis(name_table, fvar_table): axes = [] for axis in fvar_table.axes: - axes.append((axis, f'{name_table.getName(axis.axisNameID, 3, 1, 0x0409)} {axis.axisTag}')) - axes.sort(key=lambda a:a[0].axisTag) - choices = '\n'.join([f' {index}: {label}' for index, (_, label) in enumerate(axes)]) - question = ('Found axes:\n' - f'{choices}' - '\n' - 'pick one by number (e.g. 0), q=quit:') + axes.append( + ( + axis, + f"{name_table.getName(axis.axisNameID, 3, 1, 0x0409)} {axis.axisTag}", + ) + ) + axes.sort(key=lambda a: a[0].axisTag) + choices = "\n".join( + [f" {index}: {label}" for index, (_, label) in enumerate(axes)] + ) + question = "Found axes:\n" f"{choices}" "\n" "pick one by number (e.g. 0), q=quit:" while True: try: answer = input(question).strip() - if answer == 'q': + if answer == "q": raise UserAbortError() index = int(answer) # raises ValueError fvar_axis, _ = axes[index] # raises IndexError except (ValueError, IndexError): # must try again continue - print(f'You picked: {fvar_axis.axisTag}.') + print(f"You picked: {fvar_axis.axisTag}.") return fvar_axis def _get_fallbacks_gen(name_table, stat_axis_index, AxisValue): for stat_axis_value in AxisValue: - if stat_axis_value.Format in (1, 3): if stat_axis_value.AxisIndex == stat_axis_index: yield ( name_table.getName(stat_axis_value.ValueNameID, 3, 1, 0x0409), - stat_axis_value.Value + stat_axis_value.Value, ) elif stat_axis_value.Format == 4: for avr in stat_axis_value.AxisValueRecord: if avr.AxisIndex == stat_axis_index: yield ( name_table.getName(stat_axis_value.ValueNameID, 3, 1, 0x0409), - avr.Value + avr.Value, ) else: - print(f'SKIP STAT AxisValue can\'t handel Format {stat_axis_value.Format} ' - f'({name_table.getName(stat_axis_value.ValueNameID, 3, 1, 0x0409)})') + print( + f"SKIP STAT AxisValue can't handel Format {stat_axis_value.Format} " + f"({name_table.getName(stat_axis_value.ValueNameID, 3, 1, 0x0409)})" + ) def add_axis(font: str): axis_proto = AxisProto() ttFont = TTFont(font) - name_table = ttFont['name'] + name_table = ttFont["name"] try: - fvar_table = ttFont['fvar'] + fvar_table = ttFont["fvar"] except KeyError: - raise ProgramAbortError('No fvar present') + raise ProgramAbortError("No fvar present") fvar_axis = _get_fvar_axis(name_table, fvar_table) # Axis tag @@ -91,8 +93,9 @@ def add_axis(font: str): # , # , # ) - axis_proto.display_name = \ - f'{name_table.getName(fvar_axis.axisNameID, 3, 1, 0x0409)}' + axis_proto.display_name = ( + f"{name_table.getName(fvar_axis.axisNameID, 3, 1, 0x0409)}" + ) # Lower bound for the axis axis_proto.min_value = fvar_axis.minValue # The default position to use and to prefer for exemplars @@ -103,28 +106,29 @@ def add_axis(font: str): axis_proto.precision = 1 # ask user? # Short descriptive paragraph axis_proto.description = ( # ask user? - 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod' - ' tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim' - ' veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea' - ' commodo consequat. Duis aute irure dolor in reprehenderit in voluptate' - ' velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint' - ' occaecat cupidatat non proident, sunt in culpa qui officia deserunt' - ' mollit anim id est laborum.' - ) + "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod" + " tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim" + " veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea" + " commodo consequat. Duis aute irure dolor in reprehenderit in voluptate" + " velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint" + " occaecat cupidatat non proident, sunt in culpa qui officia deserunt" + " mollit anim id est laborum." + ) fallback_proto = FallbackProto() - fallback_proto.name = 'Default' + fallback_proto.name = "Default" fallback_proto.value = fvar_axis.defaultValue axis_proto.fallback.append(fallback_proto) # Is the axis fallback only? axis_proto.fallback_only = False - text_proto = text_format.MessageToString(axis_proto, as_utf8=True, - use_index_order=True) - filename = f'{axis_proto.display_name.lower()}.textproto' - with open(filename, 'x') as f: + text_proto = text_format.MessageToString( + axis_proto, as_utf8=True, use_index_order=True + ) + filename = f"{axis_proto.display_name.lower()}.textproto" + with open(filename, "x") as f: f.write(text_proto) - print(f'DONE create {filename}!') + print(f"DONE create {filename}!") def main(args=None): @@ -132,12 +136,12 @@ def main(args=None): args = parser.parse_args(args) add_axis(args.font) except UserAbortError: - print('Aborted by user!') + print("Aborted by user!") sys.exit(1) except ProgramAbortError as e: - print(f'Aborted by program: {e}') + print(f"Aborted by program: {e}") sys.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/Lib/gftools/scripts/add_designer.py b/Lib/gftools/scripts/add_designer.py index a8ca1e4f..6e3300ee 100755 --- a/Lib/gftools/scripts/add_designer.py +++ b/Lib/gftools/scripts/add_designer.py @@ -89,7 +89,7 @@ def gen_hrefs(urls): else: # https://www.mysite.com --> mysite.com res[url] = remove_url_prefix(url) - return " | ".join(f'{v}' for k,v in res.items()) + return " | ".join(f'{v}' for k, v in res.items()) def make_designer( @@ -121,7 +121,9 @@ def make_designer( image.save(img_dst) print(f"Generating info.pb file") - info_pb = gen_info(name, os.path.basename(img_dst) if os.path.isfile(img_dst) else None) + info_pb = gen_info( + name, os.path.basename(img_dst) if os.path.isfile(img_dst) else None + ) filename = os.path.join(designer_dir, "info.pb") with open(filename, "w") as f: f.write(info_pb) @@ -150,7 +152,9 @@ def main(args=None): parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument("designers_directory", help="path to google/fonts designer dir") parser.add_argument("name", help="Designer name e.g 'Steve Matteson'") - parser.add_argument("--img_path", help="Optional path to profile image", default=None) + parser.add_argument( + "--img_path", help="Optional path to profile image", default=None + ) parser.add_argument( "--spreadsheet", help="Optional path to the Google Drive spreadsheet" ) @@ -160,7 +164,9 @@ def main(args=None): try: import pandas as pd except ImportError as e: - raise ValueError("The pandas library is required to read Excel spreadsheets") + raise ValueError( + "The pandas library is required to read Excel spreadsheets" + ) df = pd.read_excel(args.spreadsheet) entry = df.loc[df["Designer Name"] == args.name] diff --git a/Lib/gftools/scripts/add_font.py b/Lib/gftools/scripts/add_font.py index 23fb90c6..2fa45c9a 100755 --- a/Lib/gftools/scripts/add_font.py +++ b/Lib/gftools/scripts/add_font.py @@ -68,281 +68,314 @@ parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument("--min_pct", type=int, default=50, help='What percentage of subset codepoints have to be supported' - ' for a non-ext subset.') -parser.add_argument("--min_pct_ext", type=float, default=2, help='What percentage of subset codepoints have to be supported' - ' for a -ext subset.') -parser.add_argument("--min_relaxed_pct", type=int, default=50, help='What percentage of subset codepoints have to be supported' - f' for a relaxed subset ({", ".join(RELAXED_SUBSETS)}).') -parser.add_argument("--lang", type=str, help='Path to lang metadata package', default=None) -parser.add_argument("directory", type=str, help='A directory containing a font family') -parser.add_argument("--github_url", type=str, default=None, - help="The font family's github url which gets written to new description files" +parser.add_argument( + "--min_pct", + type=int, + default=50, + help="What percentage of subset codepoints have to be supported" + " for a non-ext subset.", +) +parser.add_argument( + "--min_pct_ext", + type=float, + default=2, + help="What percentage of subset codepoints have to be supported" + " for a -ext subset.", +) +parser.add_argument( + "--min_relaxed_pct", + type=int, + default=50, + help="What percentage of subset codepoints have to be supported" + f' for a relaxed subset ({", ".join(RELAXED_SUBSETS)}).', +) +parser.add_argument( + "--lang", type=str, help="Path to lang metadata package", default=None +) +parser.add_argument("directory", type=str, help="A directory containing a font family") +parser.add_argument( + "--github_url", + type=str, + default=None, + help="The font family's github url which gets written to new description files", ) def _FileFamilyStyleWeights(fontdir): - """Extracts file, family, style, weight 4-tuples for each font in dir. - - Args: - fontdir: Directory that supposedly contains font files for a family. - Returns: - List of fonts.FileFamilyStyleWeightTuple ordered by weight, style - (normal first). - Raises: - OSError: If the font directory doesn't exist (errno.ENOTDIR) or has no font - files (errno.ENOENT) in it. - RuntimeError: If the font directory appears to contain files from multiple - families. - """ - if not os.path.isdir(fontdir): - raise OSError(errno.ENOTDIR, 'No such directory', fontdir) - - files = glob.glob(os.path.join(fontdir, '*.[ot]tf')) - if not files: - raise OSError(errno.ENOENT, 'no font files found') - - result = [fonts.FamilyStyleWeight(f) for f in files] - def _Cmp(r1, r2): - return cmp(r1.weight, r2.weight) or -cmp(r1.style, r2.style) - result = sorted(result, key=cmp_to_key(_Cmp)) - - family_names = {i.family for i in result} - if len(family_names) > 1: - raise RuntimeError('Ambiguous family name; possibilities: %s' - % family_names) - - return result + """Extracts file, family, style, weight 4-tuples for each font in dir. + + Args: + fontdir: Directory that supposedly contains font files for a family. + Returns: + List of fonts.FileFamilyStyleWeightTuple ordered by weight, style + (normal first). + Raises: + OSError: If the font directory doesn't exist (errno.ENOTDIR) or has no font + files (errno.ENOENT) in it. + RuntimeError: If the font directory appears to contain files from multiple + families. + """ + if not os.path.isdir(fontdir): + raise OSError(errno.ENOTDIR, "No such directory", fontdir) + + files = glob.glob(os.path.join(fontdir, "*.[ot]tf")) + if not files: + raise OSError(errno.ENOENT, "no font files found") + + result = [fonts.FamilyStyleWeight(f) for f in files] + + def _Cmp(r1, r2): + return cmp(r1.weight, r2.weight) or -cmp(r1.style, r2.style) + + result = sorted(result, key=cmp_to_key(_Cmp)) + + family_names = {i.family for i in result} + if len(family_names) > 1: + raise RuntimeError("Ambiguous family name; possibilities: %s" % family_names) + + return result def _MakeMetadata(args, is_new): - """Builds a dictionary matching a METADATA.pb file. - - Args: - fontdir: Directory containing font files for which we want metadata. - is_new: Whether this is an existing or new family. - Returns: - A fonts_pb2.FamilyProto message, the METADATA.pb structure. - Raises: - RuntimeError: If the variable font axes info differs between font files of - same family. - """ - file_family_style_weights = _FileFamilyStyleWeights(args.directory) - - first_file = file_family_style_weights[0].file - old_metadata_file = os.path.join(args.directory, 'METADATA.pb') - font_license = fonts.LicenseFromPath(args.directory) - - metadata = fonts_pb2.FamilyProto() - metadata.name = file_family_style_weights[0].family - - subsets_in_font = [s[0] for s in SubsetsInFont( - first_file, args.min_pct, args.min_pct_ext - )] - - relaxed_subsets = set(RELAXED_SUBSETS) & set([s[0] for s in SubsetsInFont( - first_file, args.min_relaxed_pct, args.min_relaxed_pct - )]) - - subsets_in_font = list(set(subsets_in_font) | relaxed_subsets) - - if not is_new: - old_metadata = fonts.ReadProto(fonts_pb2.FamilyProto(), old_metadata_file) - metadata.designer = old_metadata.designer - metadata.category[:] = old_metadata.category - metadata.date_added = old_metadata.date_added - subsets = set(old_metadata.subsets) | set(subsets_in_font) - metadata.languages[:] = old_metadata.languages - metadata.fallbacks.extend(old_metadata.fallbacks) - if old_metadata.classifications: - metadata.classifications[:] = old_metadata.classifications - if old_metadata.stroke: - metadata.stroke = old_metadata.stroke - if old_metadata.is_noto: - metadata.is_noto = True - if old_metadata.display_name: - metadata.display_name = old_metadata.display_name - if old_metadata.primary_script: - metadata.primary_script = old_metadata.primary_script - if old_metadata.sample_text and old_metadata.sample_text.ByteSize(): - metadata.sample_text.CopyFrom(old_metadata.sample_text) - if old_metadata.minisite_url: - metadata.minisite_url = old_metadata.minisite_url - if old_metadata.registry_default_overrides: - metadata.registry_default_overrides.update(old_metadata.registry_default_overrides) - if old_metadata.source: - metadata.source.CopyFrom(old_metadata.source) - else: - metadata.designer = 'UNKNOWN' - metadata.category.append('SANS_SERIF') - metadata.date_added = time.strftime('%Y-%m-%d') - subsets = ['menu'] + subsets_in_font - with ttLib.TTFont(file_family_style_weights[0][0]) as ttfont: - script = primary_script(ttfont) - if script is not None and script not in ("Latn", "Cyrl", "Grek",): - metadata.primary_script = script - - metadata.license = font_license - subsets = sorted(subsets) - for subset in subsets: - metadata.subsets.append(subset) - - for (fontfile, family, style, weight) in file_family_style_weights: - filename = os.path.basename(fontfile) - font_psname = fonts.ExtractName(fontfile, fonts.NAME_PSNAME, - os.path.splitext(filename)[0]) - font_copyright = fonts.ExtractName(fontfile, fonts.NAME_COPYRIGHT, - '???.').strip() - - font_metadata = metadata.fonts.add() - font_metadata.name = family - font_metadata.style = style - font_metadata.weight = weight - font_metadata.filename = filename - font_metadata.post_script_name = font_psname - default_fullname = os.path.splitext(filename)[0].replace('-', ' ') - font_metadata.full_name = fonts.ExtractName(fontfile, fonts.NAME_FULLNAME, - default_fullname) - font_metadata.copyright = font_copyright - - axes_info_from_font_files \ - = {_AxisInfo(f.file) for f in file_family_style_weights} - if len(axes_info_from_font_files) != 1: - raise RuntimeError('Variable axes info not matching between font files') - - for axes_info in axes_info_from_font_files: - if axes_info: - for axes in axes_info: - var_axes = metadata.axes.add() - var_axes.tag = axes[0] - var_axes.min_value = axes[1] - var_axes.max_value = axes[2] - - registry_overrides = _RegistryOverrides(axes_info_from_font_files) - if registry_overrides: - for k, v in registry_overrides.items(): - metadata.registry_default_overrides[k] = v - return metadata + """Builds a dictionary matching a METADATA.pb file. + + Args: + fontdir: Directory containing font files for which we want metadata. + is_new: Whether this is an existing or new family. + Returns: + A fonts_pb2.FamilyProto message, the METADATA.pb structure. + Raises: + RuntimeError: If the variable font axes info differs between font files of + same family. + """ + file_family_style_weights = _FileFamilyStyleWeights(args.directory) + + first_file = file_family_style_weights[0].file + old_metadata_file = os.path.join(args.directory, "METADATA.pb") + font_license = fonts.LicenseFromPath(args.directory) + + metadata = fonts_pb2.FamilyProto() + metadata.name = file_family_style_weights[0].family + + subsets_in_font = [ + s[0] for s in SubsetsInFont(first_file, args.min_pct, args.min_pct_ext) + ] + + relaxed_subsets = set(RELAXED_SUBSETS) & set( + [ + s[0] + for s in SubsetsInFont( + first_file, args.min_relaxed_pct, args.min_relaxed_pct + ) + ] + ) + + subsets_in_font = list(set(subsets_in_font) | relaxed_subsets) + + if not is_new: + old_metadata = fonts.ReadProto(fonts_pb2.FamilyProto(), old_metadata_file) + metadata.designer = old_metadata.designer + metadata.category[:] = old_metadata.category + metadata.date_added = old_metadata.date_added + subsets = set(old_metadata.subsets) | set(subsets_in_font) + metadata.languages[:] = old_metadata.languages + metadata.fallbacks.extend(old_metadata.fallbacks) + if old_metadata.classifications: + metadata.classifications[:] = old_metadata.classifications + if old_metadata.stroke: + metadata.stroke = old_metadata.stroke + if old_metadata.is_noto: + metadata.is_noto = True + if old_metadata.display_name: + metadata.display_name = old_metadata.display_name + if old_metadata.primary_script: + metadata.primary_script = old_metadata.primary_script + if old_metadata.sample_text and old_metadata.sample_text.ByteSize(): + metadata.sample_text.CopyFrom(old_metadata.sample_text) + if old_metadata.minisite_url: + metadata.minisite_url = old_metadata.minisite_url + if old_metadata.registry_default_overrides: + metadata.registry_default_overrides.update( + old_metadata.registry_default_overrides + ) + if old_metadata.source: + metadata.source.CopyFrom(old_metadata.source) + else: + metadata.designer = "UNKNOWN" + metadata.category.append("SANS_SERIF") + metadata.date_added = time.strftime("%Y-%m-%d") + subsets = ["menu"] + subsets_in_font + with ttLib.TTFont(file_family_style_weights[0][0]) as ttfont: + script = primary_script(ttfont) + if script is not None and script not in ( + "Latn", + "Cyrl", + "Grek", + ): + metadata.primary_script = script + + metadata.license = font_license + subsets = sorted(subsets) + for subset in subsets: + metadata.subsets.append(subset) + + for fontfile, family, style, weight in file_family_style_weights: + filename = os.path.basename(fontfile) + font_psname = fonts.ExtractName( + fontfile, fonts.NAME_PSNAME, os.path.splitext(filename)[0] + ) + font_copyright = fonts.ExtractName( + fontfile, fonts.NAME_COPYRIGHT, "???." + ).strip() + + font_metadata = metadata.fonts.add() + font_metadata.name = family + font_metadata.style = style + font_metadata.weight = weight + font_metadata.filename = filename + font_metadata.post_script_name = font_psname + default_fullname = os.path.splitext(filename)[0].replace("-", " ") + font_metadata.full_name = fonts.ExtractName( + fontfile, fonts.NAME_FULLNAME, default_fullname + ) + font_metadata.copyright = font_copyright + + axes_info_from_font_files = {_AxisInfo(f.file) for f in file_family_style_weights} + if len(axes_info_from_font_files) != 1: + raise RuntimeError("Variable axes info not matching between font files") + + for axes_info in axes_info_from_font_files: + if axes_info: + for axes in axes_info: + var_axes = metadata.axes.add() + var_axes.tag = axes[0] + var_axes.min_value = axes[1] + var_axes.max_value = axes[2] + + registry_overrides = _RegistryOverrides(axes_info_from_font_files) + if registry_overrides: + for k, v in registry_overrides.items(): + metadata.registry_default_overrides[k] = v + return metadata def _RegistryOverrides(axes_info): - """Get registry default value overrides for family axes. - - Args: - axes_info: set of Variable axes info - - Returns: - A dict structured {axis_tag: font_axis_default_value} - """ - res = {} - for font in axes_info: - for axis_tag, min_val, max_val, dflt_val in font: - if axis_tag not in axis_registry: - continue - default_val = axis_registry[axis_tag].default_value - if default_val >= min_val and default_val <= max_val: - continue - if axis_tag not in res: - res[axis_tag] = dflt_val - else: - res[axis_tag] = min(res[axis_tag], dflt_val) - return res + """Get registry default value overrides for family axes. + + Args: + axes_info: set of Variable axes info + + Returns: + A dict structured {axis_tag: font_axis_default_value} + """ + res = {} + for font in axes_info: + for axis_tag, min_val, max_val, dflt_val in font: + if axis_tag not in axis_registry: + continue + default_val = axis_registry[axis_tag].default_value + if default_val >= min_val and default_val <= max_val: + continue + if axis_tag not in res: + res[axis_tag] = dflt_val + else: + res[axis_tag] = min(res[axis_tag], dflt_val) + return res def _AxisInfo(fontfile): - """Gets variable axes info. + """Gets variable axes info. - Args: - fontfile: Font file to look at for variation info + Args: + fontfile: Font file to look at for variation info - Returns: - Variable axes info - """ - with contextlib.closing(ttLib.TTFont(fontfile)) as font: - if 'fvar' not in font: - return frozenset() - else: - fvar = font['fvar'] - axis_info = [ - (a.axisTag, a.minValue, a.maxValue, a.defaultValue) for a in fvar.axes - ] - return tuple(sorted(axis_info)) + Returns: + Variable axes info + """ + with contextlib.closing(ttLib.TTFont(fontfile)) as font: + if "fvar" not in font: + return frozenset() + else: + fvar = font["fvar"] + axis_info = [ + (a.axisTag, a.minValue, a.maxValue, a.defaultValue) for a in fvar.axes + ] + return tuple(sorted(axis_info)) def _GetAvgSize(file_family_style_weights): - """Gets average file size of all font weights. + """Gets average file size of all font weights. - Returns: - average file size. + Returns: + average file size. - Args: - file_family_style_weights: List of fonts.FileFamilyStyleWeightTuple. - """ - total_size = 0 - for list_tuple in file_family_style_weights: - total_size += os.stat(list_tuple.file).st_size - return total_size / len(file_family_style_weights) + Args: + file_family_style_weights: List of fonts.FileFamilyStyleWeightTuple. + """ + total_size = 0 + for list_tuple in file_family_style_weights: + total_size += os.stat(list_tuple.file).st_size + return total_size / len(file_family_style_weights) def _WriteTextFile(filename, text): - """Write text to file. - - Nop if file exists with that exact content. This allows running against files - that are in Piper and not marked for editing; you will get an error only if - something changed. + """Write text to file. - Args: - filename: The file to write. - text: The content to write to the file. - """ - if os.path.isfile(filename): - with open(filename, 'r') as f: - current = f.read() - if current == text: - print('No change to %s' % filename) - return - - with open(filename, 'w') as f: - f.write(text) - print('Wrote %s' % filename) + Nop if file exists with that exact content. This allows running against files + that are in Piper and not marked for editing; you will get an error only if + something changed. + Args: + filename: The file to write. + text: The content to write to the file. + """ + if os.path.isfile(filename): + with open(filename, "r") as f: + current = f.read() + if current == text: + print("No change to %s" % filename) + return + with open(filename, "w") as f: + f.write(text) + print("Wrote %s" % filename) def _AddHumanReadableDateComment(text_proto): - return re.sub(r'(date_added: \d+)', - r'\1 # ' + time.strftime('%Y-%m-%d'), text_proto) + return re.sub( + r"(date_added: \d+)", r"\1 # " + time.strftime("%Y-%m-%d"), text_proto + ) def main(args=None): - args = parser.parse_args(args) - is_new = True - fontdir = args.directory - old_metadata_file = os.path.join(fontdir, 'METADATA.pb') - if os.path.isfile(old_metadata_file): - is_new = False - - language_comments = fonts.LanguageComments( - LoadLanguages(base_dir=args.lang) - ) - metadata = _MakeMetadata(args, is_new) - fonts.WriteMetadata(metadata, os.path.join(fontdir, 'METADATA.pb'), comments=language_comments) - - desc = os.path.join(fontdir, 'DESCRIPTION.en_us.html') - articledir = os.path.join(fontdir, "article") - article = os.path.join(articledir, "ARTICLE.en_us.html") - if os.path.isfile(desc): - print('DESCRIPTION.en_us.html exists') - elif os.path.isfile(article): - print("ARTICLE.en_us.html exists") - else: - os.makedirs(os.path.join(fontdir, "article")) - desc_text = "N/A" - if args.github_url: - human_url = remove_url_prefix(args.github_url) - desc_text += f'\n

To contribute, please see {human_url}.

' - _WriteTextFile(article, desc_text) - - -if __name__ == '__main__': + args = parser.parse_args(args) + is_new = True + fontdir = args.directory + old_metadata_file = os.path.join(fontdir, "METADATA.pb") + if os.path.isfile(old_metadata_file): + is_new = False + + language_comments = fonts.LanguageComments(LoadLanguages(base_dir=args.lang)) + metadata = _MakeMetadata(args, is_new) + fonts.WriteMetadata( + metadata, os.path.join(fontdir, "METADATA.pb"), comments=language_comments + ) + + desc = os.path.join(fontdir, "DESCRIPTION.en_us.html") + articledir = os.path.join(fontdir, "article") + article = os.path.join(articledir, "ARTICLE.en_us.html") + if os.path.isfile(desc): + print("DESCRIPTION.en_us.html exists") + elif os.path.isfile(article): + print("ARTICLE.en_us.html exists") + else: + os.makedirs(os.path.join(fontdir, "article")) + desc_text = "N/A" + if args.github_url: + human_url = remove_url_prefix(args.github_url) + desc_text += f'\n

To contribute, please see {human_url}.

' + _WriteTextFile(article, desc_text) + + +if __name__ == "__main__": main() diff --git a/Lib/gftools/scripts/autohint.py b/Lib/gftools/scripts/autohint.py index 835660bf..8c56625a 100755 --- a/Lib/gftools/scripts/autohint.py +++ b/Lib/gftools/scripts/autohint.py @@ -37,6 +37,7 @@ from gftools.builder.autohint import autohint + def main(args=None): parser = argparse.ArgumentParser( description=("Automatically hint a TrueType font"), @@ -58,7 +59,9 @@ def main(args=None): action="store_true", help="When determining the script, ignore Latin glyphs", ) - parser.add_argument("--args", help="Any additional arguments to pass to ttfautohint") + parser.add_argument( + "--args", help="Any additional arguments to pass to ttfautohint" + ) parser.add_argument( "--output", "-o", @@ -66,7 +69,6 @@ def main(args=None): ) parser.add_argument("input", metavar="FONT", help="Font to hint") - args = parser.parse_args(args) if not args.output: diff --git a/Lib/gftools/scripts/build_ofl.py b/Lib/gftools/scripts/build_ofl.py index 77911992..e073df2b 100755 --- a/Lib/gftools/scripts/build_ofl.py +++ b/Lib/gftools/scripts/build_ofl.py @@ -37,4 +37,4 @@ def main(args=None): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/Lib/gftools/scripts/check_bbox.py b/Lib/gftools/scripts/check_bbox.py index 3fabc582..d83749a3 100755 --- a/Lib/gftools/scripts/check_bbox.py +++ b/Lib/gftools/scripts/check_bbox.py @@ -24,27 +24,39 @@ Users can either check a collection of fonts bounding boxes (--family) or the bounding box for each glyph in the collection of fonts (--glyphs). """ -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter import csv import sys from fontTools.ttLib import TTFont import tabulate -parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) -parser.add_argument('fonts', - nargs='+', - help='Fonts in OpenType (TTF/OTF) format') -parser.add_argument('--csv', default=False, action='store_true', - help='Output data in comma-separated-values format') -parser.add_argument('--extremes', default=False, action='store_true', - help='Print extremes coordinates for each category') + +parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) +parser.add_argument("fonts", nargs="+", help="Fonts in OpenType (TTF/OTF) format") +parser.add_argument( + "--csv", + default=False, + action="store_true", + help="Output data in comma-separated-values format", +) +parser.add_argument( + "--extremes", + default=False, + action="store_true", + help="Print extremes coordinates for each category", +) group = parser.add_mutually_exclusive_group(required=True) -group.add_argument('--glyphs', default=False, action='store_true', - help=('Return the bounds for glyphs' - ' in a collection of fonts')) -group.add_argument('--family', default=False, action="store_true", - help='Return the bounds for a family of fonts') +group.add_argument( + "--glyphs", + default=False, + action="store_true", + help=("Return the bounds for glyphs" " in a collection of fonts"), +) +group.add_argument( + "--family", + default=False, + action="store_true", + help="Return the bounds for a family of fonts", +) def printInfo(rows, save=False): @@ -85,39 +97,44 @@ def main(args=None): font_path = font font = TTFont(font_path) if args.glyphs: - for g_name in font['glyf'].glyphs: - glyph = font['glyf'][g_name] + for g_name in font["glyf"].glyphs: + glyph = font["glyf"][g_name] try: - rows.append([ - ("Font", font_path), - ("Glyph", g_name), - ("xMin", glyph.xMin), - ("yMin", glyph.yMin), - ("xMax", glyph.xMax), - ("yMax", glyph.yMax) - ]) + rows.append( + [ + ("Font", font_path), + ("Glyph", g_name), + ("xMin", glyph.xMin), + ("yMin", glyph.yMin), + ("xMax", glyph.xMax), + ("yMax", glyph.yMax), + ] + ) except AttributeError: # glyphs without paths or components don't have # yMin, yMax etc - rows.append([ - ("Font", font_path), - ("Glyph", g_name), - ("xMin", 0), - ("yMin", 0), - ("xMax", 0), - ("yMax", 0) - ]) + rows.append( + [ + ("Font", font_path), + ("Glyph", g_name), + ("xMin", 0), + ("yMin", 0), + ("xMax", 0), + ("yMax", 0), + ] + ) pass - elif args.family: - rows.append([ - ("Font", font_path), - ("xMin", font['head'].xMin), - ("yMin", font['head'].yMin), - ("xMax", font['head'].xMax), - ("yMax", font['head'].yMax) - ]) + rows.append( + [ + ("Font", font_path), + ("xMin", font["head"].xMin), + ("yMin", font["head"].yMin), + ("xMax", font["head"].xMax), + ("yMax", font["head"].yMax), + ] + ) if args.extremes: rows = find_extremes(rows) @@ -127,5 +144,6 @@ def main(args=None): else: printInfo(rows) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/Lib/gftools/scripts/check_category.py b/Lib/gftools/scripts/check_category.py index eebb2d4a..f68a497c 100755 --- a/Lib/gftools/scripts/check_category.py +++ b/Lib/gftools/scripts/check_category.py @@ -24,32 +24,37 @@ from gftools.fonts_public_pb2 import FamilyProto from google.protobuf import text_format -description = ("Comparison of category fields of local METADATA.pb files" - " with data corresponding metadata on the Google Fonts Developer API.\n\n" - " In order to use it you need to provide an API key.") +description = ( + "Comparison of category fields of local METADATA.pb files" + " with data corresponding metadata on the Google Fonts Developer API.\n\n" + " In order to use it you need to provide an API key." +) parser = argparse.ArgumentParser(description=description) -parser.add_argument('key', help='Key from Google Fonts Developer API') -parser.add_argument('repo', - help=('Directory tree that contains' - ' directories with METADATA.pb files.')) -parser.add_argument('--verbose', - help='Print additional information', - action="store_true") +parser.add_argument("key", help="Key from Google Fonts Developer API") +parser.add_argument( + "repo", help=("Directory tree that contains" " directories with METADATA.pb files.") +) +parser.add_argument( + "--verbose", help="Print additional information", action="store_true" +) + + +API_URL = "https://www.googleapis.com/webfonts/v1/webfonts?key={}" -API_URL = 'https://www.googleapis.com/webfonts/v1/webfonts?key={}' def main(args=None): args = parser.parse_args(args) response = requests.get(API_URL.format(args.key)) try: - webfontList = response.json()['items'] - webfontListFamilyNames = [item['family'] for item in webfontList] + webfontList = response.json()["items"] + webfontListFamilyNames = [item["family"] for item in webfontList] except (ValueError, KeyError): - sys.exit("Unable to load and parse" - " list of families from Google Web Fonts API.") + sys.exit( + "Unable to load and parse" " list of families from Google Web Fonts API." + ) for dirpath, dirnames, filenames in os.walk(args.repo): - metadata_path = os.path.join(dirpath, 'METADATA.pb') + metadata_path = os.path.join(dirpath, "METADATA.pb") if not os.path.exists(metadata_path): continue @@ -59,9 +64,12 @@ def main(args=None): try: family = metadata.name except KeyError: - print(('ERROR: "{}" does not contain' - ' familyname info.').format(metadata_path), - file=sys.stderr) + print( + ('ERROR: "{}" does not contain' " familyname info.").format( + metadata_path + ), + file=sys.stderr, + ) continue try: @@ -69,8 +77,12 @@ def main(args=None): webfontsItem = webfontList[index] except ValueError: if args.verbose: - print(('ERROR: Family "{}" could not be found' - ' in Google Web Fonts API.').format(family)) + print( + ( + 'ERROR: Family "{}" could not be found' + " in Google Web Fonts API." + ).format(family) + ) continue if metadata.category == "SANS_SERIF": # That's fine :-) @@ -78,18 +90,22 @@ def main(args=None): else: category = metadata.category.lower() - if category != webfontsItem['category']: - print(('ERROR: "{}" category "{}" in git' - ' does not match category "{}"' - ' in API.').format(family, - metadata.category, - webfontsItem['category'])) + if category != webfontsItem["category"]: + print( + ( + 'ERROR: "{}" category "{}" in git' + ' does not match category "{}"' + " in API." + ).format(family, metadata.category, webfontsItem["category"]) + ) else: if args.verbose: - print(('OK: "{}" ' - 'category "{}" in sync.').format(family, - metadata.category)) + print( + ('OK: "{}" ' 'category "{}" in sync.').format( + family, metadata.category + ) + ) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/check_copyright_notices.py b/Lib/gftools/scripts/check_copyright_notices.py index b61ee274..11b581a6 100755 --- a/Lib/gftools/scripts/check_copyright_notices.py +++ b/Lib/gftools/scripts/check_copyright_notices.py @@ -18,15 +18,16 @@ import os import tabulate from fontTools import ttLib -from gftools.constants import (NAMEID_COPYRIGHT_NOTICE, - PLATID_STR) +from gftools.constants import NAMEID_COPYRIGHT_NOTICE, PLATID_STR -parser = argparse.ArgumentParser(description='Print out copyright' - ' nameIDs strings') -parser.add_argument('font', nargs="+") -parser.add_argument('--csv', default=False, action='store_true', - help="Output data in comma-separate-values" - " (CSV) file format") +parser = argparse.ArgumentParser(description="Print out copyright" " nameIDs strings") +parser.add_argument("font", nargs="+") +parser.add_argument( + "--csv", + default=False, + action="store_true", + help="Output data in comma-separate-values" " (CSV) file format", +) def main(args=None): @@ -35,27 +36,31 @@ def main(args=None): rows = [] for font in args.font: ttfont = ttLib.TTFont(font) - for name in ttfont['name'].names: + for name in ttfont["name"].names: if name.nameID != NAMEID_COPYRIGHT_NOTICE: continue - value = name.string.decode(name.getEncoding()) or '' - rows.append([os.path.basename(font), - value, - len(value), - "{} ({})".format( - name.platformID, - PLATID_STR.get(name.platformID, "?"))]) + value = name.string.decode(name.getEncoding()) or "" + rows.append( + [ + os.path.basename(font), + value, + len(value), + "{} ({})".format( + name.platformID, PLATID_STR.get(name.platformID, "?") + ), + ] + ) - header = ['filename', 'copyright notice', 'char length', 'platformID'] + header = ["filename", "copyright notice", "char length", "platformID"] def as_csv(rows): import csv import sys - writer = csv.writer(sys.stdout, - delimiter='|', - quotechar='"', - quoting=csv.QUOTE_MINIMAL) + + writer = csv.writer( + sys.stdout, delimiter="|", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) writer.writerows([header]) writer.writerows(rows) sys.exit(0) @@ -63,13 +68,14 @@ def as_csv(rows): if args.csv: as_csv(rows) - print("") #some spacing + print("") # some spacing print(tabulate.tabulate(rows, header, tablefmt="pipe")) - print("") #some spacing + print("") # some spacing + -if __name__ == '__main__': - """ Example usage: +if __name__ == "__main__": + """Example usage: - gftools check-copyright-notices ~/fonts/*/*/*ttf --csv > ~/notices.txt; - """ - main() + gftools check-copyright-notices ~/fonts/*/*/*ttf --csv > ~/notices.txt; + """ + main() diff --git a/Lib/gftools/scripts/check_font_version.py b/Lib/gftools/scripts/check_font_version.py index 7927fa92..066600ca 100755 --- a/Lib/gftools/scripts/check_font_version.py +++ b/Lib/gftools/scripts/check_font_version.py @@ -23,75 +23,74 @@ from ntpath import basename from zipfile import ZipFile from gftools.utils import ( - download_family_from_Google_Fonts, - download_file, - fonts_from_zip + download_family_from_Google_Fonts, + download_file, + fonts_from_zip, ) + def parse_version_head(fonts): - """Return a family's version number. Ideally, each font in the - family should have the same version number. If not, return the highest - version number.""" - versions = [] - if isinstance(fonts, list): - for font in fonts: - versions.append(float(font['head'].fontRevision)) - else: - versions.append(float(fonts['head'].fontRevision)) - return max(versions) + """Return a family's version number. Ideally, each font in the + family should have the same version number. If not, return the highest + version number.""" + versions = [] + if isinstance(fonts, list): + for font in fonts: + versions.append(float(font["head"].fontRevision)) + else: + versions.append(float(fonts["head"].fontRevision)) + return max(versions) -def main(args=None): - parser = ArgumentParser(description=__doc__) - parser.add_argument('family', - help='Name of font family') - parser.add_argument('-wc', '--web-compare', - help='Compare against a web url .zip family') - parser.add_argument('-lc', '--local-compare', nargs='+', - help='Compare against a set of local ttfs') - args = parser.parse_args(args) - google_family = download_family_from_Google_Fonts(args.family) - google_family_fonts = [TTFont(f) for f in google_family] - google_family_version = parse_version_head(google_family_fonts) +def main(args=None): + parser = ArgumentParser(description=__doc__) + parser.add_argument("family", help="Name of font family") + parser.add_argument( + "-wc", "--web-compare", help="Compare against a web url .zip family" + ) + parser.add_argument( + "-lc", "--local-compare", nargs="+", help="Compare against a set of local ttfs" + ) + args = parser.parse_args(args) + google_family = download_family_from_Google_Fonts(args.family) + google_family_fonts = [TTFont(f) for f in google_family] + google_family_version = parse_version_head(google_family_fonts) - if args.web_compare: - if args.web_compare.endswith('.zip'): - web_family_zip = ZipFile(download_file(args.web_compare)) - web_family = fonts_from_zip(web_family_zip) - web_family_fonts = [TTFont(f) for f in web_family - if f.name.endswith(".ttf")] - web_family_name = set(f.reader.file.name.split('-')[0] for f in web_family) - web_family_version = parse_version_head(web_family_fonts) - print('Google Fonts Version of %s is v%s' % ( - args.family, - google_family_version - )) - print('Web Version of %s is v%s' % ( - ', '.join(web_family_name), - web_family_version - )) + if args.web_compare: + if args.web_compare.endswith(".zip"): + web_family_zip = ZipFile(download_file(args.web_compare)) + web_family = fonts_from_zip(web_family_zip) + web_family_fonts = [ + TTFont(f) for f in web_family if f.name.endswith(".ttf") + ] + web_family_name = set(f.reader.file.name.split("-")[0] for f in web_family) + web_family_version = parse_version_head(web_family_fonts) + print( + "Google Fonts Version of %s is v%s" % (args.family, google_family_version) + ) + print( + "Web Version of %s is v%s" + % (", ".join(web_family_name), web_family_version) + ) - elif args.local_compare: - local_family = [TTFont(f) for f in args.local_compare] - local_family_version = parse_version_head(local_family) - local_fonts_name = set(basename(f.split('-')[0]) for f in - args.local_compare) - print('Google Fonts Version of %s is v%s' % ( - args.family, - google_family_version - )) - print('Local Version of %s is v%s' % ( - ','.join(local_fonts_name), - local_family_version - )) + elif args.local_compare: + local_family = [TTFont(f) for f in args.local_compare] + local_family_version = parse_version_head(local_family) + local_fonts_name = set(basename(f.split("-")[0]) for f in args.local_compare) + print( + "Google Fonts Version of %s is v%s" % (args.family, google_family_version) + ) + print( + "Local Version of %s is v%s" + % (",".join(local_fonts_name), local_family_version) + ) - else: - print('Google Fonts Version of %s is v%s' % ( - args.family, - google_family_version - )) + else: + print( + "Google Fonts Version of %s is v%s" % (args.family, google_family_version) + ) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/check_name.py b/Lib/gftools/scripts/check_name.py index cb888fca..ef72adca 100755 --- a/Lib/gftools/scripts/check_name.py +++ b/Lib/gftools/scripts/check_name.py @@ -27,59 +27,57 @@ Output in csv format gftools check-name [fonts] --csv """ -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter import csv import sys from fontTools.ttLib import TTFont import tabulate import ntpath -parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) -parser.add_argument('fonts', - nargs="+", - help="Fonts in OpenType (TTF/OTF) format") -parser.add_argument('--csv', default=False, action='store_true') +parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) +parser.add_argument("fonts", nargs="+", help="Fonts in OpenType (TTF/OTF) format") +parser.add_argument("--csv", default=False, action="store_true") def printInfo(rows, save=False): - header = [r[0] for r in rows[0]] - t = [] - for row in rows: - t.append([r[1] for r in row]) + header = [r[0] for r in rows[0]] + t = [] + for row in rows: + t.append([r[1] for r in row]) - if save: - writer = csv.writer(sys.stdout) - writer.writerows([header]) - writer.writerows(t) - sys.exit(0) - else: - print(tabulate.tabulate(t, header, tablefmt="plain")) + if save: + writer = csv.writer(sys.stdout) + writer.writerows([header]) + writer.writerows(t) + sys.exit(0) + else: + print(tabulate.tabulate(t, header, tablefmt="plain")) def main(args=None): - args = parser.parse_args(args) + args = parser.parse_args(args) - rows = [] - for font_filename in args.fonts: - font = TTFont(font_filename) - for field in font['name'].names: - enc = field.getEncoding() - rows.append([ - ('Font', ntpath.basename(font_filename)), - ('platformID', field.platformID), - ('encodingID', field.platEncID), - ('languageID', field.langID), - ('nameID', field.nameID), - ('nameString', field.toUnicode()), - ]) + rows = [] + for font_filename in args.fonts: + font = TTFont(font_filename) + for field in font["name"].names: + enc = field.getEncoding() + rows.append( + [ + ("Font", ntpath.basename(font_filename)), + ("platformID", field.platformID), + ("encodingID", field.platEncID), + ("languageID", field.langID), + ("nameID", field.nameID), + ("nameString", field.toUnicode()), + ] + ) - if args.csv: - printInfo(rows, save=True) - else: - printInfo(rows) + if args.csv: + printInfo(rows, save=True) + else: + printInfo(rows) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/check_sandbox_family.py b/Lib/gftools/scripts/check_sandbox_family.py index 51f77631..74679c7e 100755 --- a/Lib/gftools/scripts/check_sandbox_family.py +++ b/Lib/gftools/scripts/check_sandbox_family.py @@ -10,7 +10,7 @@ import os import sys from selenium import webdriver -from selenium.webdriver.chrome.options import Options +from selenium.webdriver.chrome.options import Options from PIL import Image, ImageDraw, ImageFont import argparse from urllib.parse import urlsplit @@ -33,8 +33,8 @@ def get_font_for_os(): if sys.platform.startswith("linux"): return os.path.join( - "usr", "share", "font", "truetype", "noto" - "NotoMono-Regular.ttf") + "usr", "share", "font", "truetype", "noto" "NotoMono-Regular.ttf" + ) elif sys.platform.startswith("darwin"): return os.path.join("Library", "Fonts", "Arial.ttf") elif sys.platform.startswith("win"): @@ -42,13 +42,12 @@ def get_font_for_os(): else: raise NotImplementedError("Please use OSX, Ubuntu or Win") + def main(args=None): parser = argparse.ArgumentParser() parser.add_argument("url") - parser.add_argument("-o", "--out", - help="Gif out path e.g ~/Desktop/site1.gif") - parser.add_argument("-l", "--limit", type=int, - help="limit diff height") + parser.add_argument("-o", "--out", help="Gif out path e.g ~/Desktop/site1.gif") + parser.add_argument("-l", "--limit", type=int, help="limit diff height") args = parser.parse_args(args) chrome_options = Options() @@ -56,20 +55,27 @@ def main(args=None): with webdriver.Chrome(options=chrome_options) as driver: driver.get(args.url) - required_height = driver.execute_script('return document.body.parentNode.scrollHeight') + required_height = driver.execute_script( + "return document.body.parentNode.scrollHeight" + ) if args.limit and required_height > args.limit: required_height = args.limit driver.set_window_size(WIDTH, required_height) try: families_in_use = driver.find_elements_by_xpath( - '//link[contains(@href, "fonts.googleapis.com/css")]' + '//link[contains(@href, "fonts.googleapis.com/css")]' ) for family in families_in_use: - print("Changing GF url %s to %s" % ( - family.get_attribute("href"), family.get_attribute("href").replace( - "fonts.googleapis.com", "fonts.sandbox.google.com") - )) + print( + "Changing GF url %s to %s" + % ( + family.get_attribute("href"), + family.get_attribute("href").replace( + "fonts.googleapis.com", "fonts.sandbox.google.com" + ), + ) + ) except: raise Exception("No hosted GF families found on %s" % args.url) @@ -84,26 +90,25 @@ def main(args=None): else: gif_path = urlsplit(args.url).netloc + ".gif" - with Image.open(BytesIO(before_img)) as before, Image.open(BytesIO(after_img)) as after: + with Image.open(BytesIO(before_img)) as before, Image.open( + BytesIO(after_img) + ) as after: font_path = get_font_for_os() font = ImageFont.truetype(font_path, 32) before_draw = ImageDraw.Draw(before) before_draw.rectangle((0, 0, WIDTH, 50), fill=(0, 0, 0)) - before_draw.text((10, 10), "Production", - (255, 0, 0), font=font) + before_draw.text((10, 10), "Production", (255, 0, 0), font=font) after_draw = ImageDraw.Draw(after) after_draw.rectangle((0, 0, WIDTH, 50), fill=(0, 0, 0)) - after_draw.text((10, 10), "Sandbox", - (255, 0, 0), font=font) + after_draw.text((10, 10), "Sandbox", (255, 0, 0), font=font) before.save( - gif_path, - save_all=True, - append_images=[after], - loop=10000, - duration=1000, + gif_path, + save_all=True, + append_images=[after], + loop=10000, + duration=1000, ) if __name__ == "__main__": main() - diff --git a/Lib/gftools/scripts/check_vtt_compatibility.py b/Lib/gftools/scripts/check_vtt_compatibility.py index a6415e55..785930a4 100755 --- a/Lib/gftools/scripts/check_vtt_compatibility.py +++ b/Lib/gftools/scripts/check_vtt_compatibility.py @@ -20,99 +20,101 @@ Check a hinted font will successfully transfer vtt instructions to an unhinted font. """ -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter from fontTools.ttLib import TTFont import logging def font_glyphs(font): - '''return a dict of glyphs objects for font + """return a dict of glyphs objects for font - {'a': , 'b': }''' - return {g: font['glyf'][g] for g in font['glyf'].glyphs} + {'a': , 'b': }""" + return {g: font["glyf"][g] for g in font["glyf"].glyphs} def glyphs_points(font): - '''return a dict of glyphs coordinates/composites for each font - - {'a': [(0,0), (10,10)], 'b': [(10,10, (20,20))]}, - ''' - res = {} - for glyph in font: - if hasattr(font[glyph], 'coordinates'): - res[glyph] = font[glyph].coordinates - elif font[glyph].isComposite(): - res[glyph] = [c.glyphName for c in font[glyph].components] - else: - res[glyph] = None - return res + """return a dict of glyphs coordinates/composites for each font + + {'a': [(0,0), (10,10)], 'b': [(10,10, (20,20))]}, + """ + res = {} + for glyph in font: + if hasattr(font[glyph], "coordinates"): + res[glyph] = font[glyph].coordinates + elif font[glyph].isComposite(): + res[glyph] = [c.glyphName for c in font[glyph].components] + else: + res[glyph] = None + return res def compare_glyph_count(font1, name1, name2): - if font1: - logging.warning('%s missing glyphs against %s:\n%s' % ( - name1, - name2, - ', '.join(font1) - )) - else: - logging.info('%s %s glyphs match' % (name1, name2)) - - -parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) -parser.add_argument('hinted', help='Hinted font') -parser.add_argument('unhinted', help='Unhinted font') -parser.add_argument('--count', action="store_true", default=True, - help="Check fonts have the same glyph count") -parser.add_argument('--compatible', action="store_true", default=True, - help="Check glyphs share same coordinates and composites") + if font1: + logging.warning( + "%s missing glyphs against %s:\n%s" % (name1, name2, ", ".join(font1)) + ) + else: + logging.info("%s %s glyphs match" % (name1, name2)) + + +parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) +parser.add_argument("hinted", help="Hinted font") +parser.add_argument("unhinted", help="Unhinted font") +parser.add_argument( + "--count", + action="store_true", + default=True, + help="Check fonts have the same glyph count", +) +parser.add_argument( + "--compatible", + action="store_true", + default=True, + help="Check glyphs share same coordinates and composites", +) logging.getLogger().setLevel(logging.DEBUG) def main(args=None): - args = parser.parse_args(args) + args = parser.parse_args(args) - hinted = TTFont(args.hinted) - unhinted = TTFont(args.unhinted) + hinted = TTFont(args.hinted) + unhinted = TTFont(args.unhinted) - hinted_glyphs = font_glyphs(hinted) - unhinted_glyphs = font_glyphs(unhinted) + hinted_glyphs = font_glyphs(hinted) + unhinted_glyphs = font_glyphs(unhinted) - if args.count: - logging.debug('Comparing glyph counts:') + if args.count: + logging.debug("Comparing glyph counts:") - hinted_missing = set(unhinted_glyphs.keys()) - set(hinted_glyphs.keys()) - unhinted_missing = set(hinted_glyphs.keys()) - set(unhinted_glyphs.keys()) + hinted_missing = set(unhinted_glyphs.keys()) - set(hinted_glyphs.keys()) + unhinted_missing = set(hinted_glyphs.keys()) - set(unhinted_glyphs.keys()) - compare_glyph_count(hinted_missing, args.hinted, args.unhinted) - compare_glyph_count(unhinted_missing, args.unhinted, args.hinted) + compare_glyph_count(hinted_missing, args.hinted, args.unhinted) + compare_glyph_count(unhinted_missing, args.unhinted, args.hinted) - if args.compatible: - logging.debug('Check glyph structures match') + if args.compatible: + logging.debug("Check glyph structures match") - hinted_glyph_points = glyphs_points(hinted_glyphs) - unhinted_glyph_points = glyphs_points(unhinted_glyphs) + hinted_glyph_points = glyphs_points(hinted_glyphs) + unhinted_glyph_points = glyphs_points(unhinted_glyphs) - shared_glyphs = set(unhinted_glyphs) & set(hinted_glyphs.keys()) + shared_glyphs = set(unhinted_glyphs) & set(hinted_glyphs.keys()) - incompatible_glyphs = [] - for glyph in shared_glyphs: - if unhinted_glyph_points[glyph] != hinted_glyph_points[glyph]: - incompatible_glyphs.append(glyph) + incompatible_glyphs = [] + for glyph in shared_glyphs: + if unhinted_glyph_points[glyph] != hinted_glyph_points[glyph]: + incompatible_glyphs.append(glyph) - if incompatible_glyphs: - logging.warning('Incompatible glyphs between %s & %s:\n%s' % ( - args.hinted, - args.unhinted, - ', '.join(incompatible_glyphs) - ) - ) - else: - logging.info('Glyph sets are compatible') + if incompatible_glyphs: + logging.warning( + "Incompatible glyphs between %s & %s:\n%s" + % (args.hinted, args.unhinted, ", ".join(incompatible_glyphs)) + ) + else: + logging.info("Glyph sets are compatible") -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/check_vtt_compile.py b/Lib/gftools/scripts/check_vtt_compile.py index 76143c43..6454ad4d 100755 --- a/Lib/gftools/scripts/check_vtt_compile.py +++ b/Lib/gftools/scripts/check_vtt_compile.py @@ -4,6 +4,7 @@ from fontTools.ttLib import TTFont import argparse + def main(args=None): parser = argparse.ArgumentParser() parser.add_argument("font_path") @@ -39,12 +40,11 @@ def main(args=None): return if args.remove_incompatible_hinting: for _, glyph_name in incompatible_glyphs: - font['TSI1'].glyphPrograms[glyph_name] = "" - font['TSI3'].glyphPrograms[glyph_name] = "" + font["TSI1"].glyphPrograms[glyph_name] = "" + font["TSI3"].glyphPrograms[glyph_name] = "" font.save(args.font_path + ".fix") print("Incompatible glyph hints have been removed") - + if __name__ == "__main__": main() - diff --git a/Lib/gftools/scripts/compare_font.py b/Lib/gftools/scripts/compare_font.py index e2736225..1bb9187b 100755 --- a/Lib/gftools/scripts/compare_font.py +++ b/Lib/gftools/scripts/compare_font.py @@ -72,156 +72,209 @@ from gfsubsets import CodepointsInFont, CodepointsInSubset -parser = argparse.ArgumentParser(description='Compare size and coverage of two fonts') -parser.add_argument('first_font') -parser.add_argument('second_font') -parser.add_argument('--nodiff_tables', dest="diff_tables", action="store_false", help='Whether to print table size diffs') -parser.add_argument('--nodiff_coverage', dest="diff_coverage", action="store_false", help='Whether to print coverage diffs') - - -_KNOWN_TABLES = ('BASE', 'CFF ', 'DSIG', 'GDEF', 'GPOS', 'GSUB', 'LTSH', - 'OS/2', 'VORG', 'cmap', 'cvt ', 'fpgm', 'gasp', 'glyf', 'hdmx', - 'head', 'hhea', 'hmtx', 'loca', 'maxp', 'name', 'post', 'prep', - 'FFTM', 'kern', 'vhea', 'vmtx') +parser = argparse.ArgumentParser(description="Compare size and coverage of two fonts") +parser.add_argument("first_font") +parser.add_argument("second_font") +parser.add_argument( + "--nodiff_tables", + dest="diff_tables", + action="store_false", + help="Whether to print table size diffs", +) +parser.add_argument( + "--nodiff_coverage", + dest="diff_coverage", + action="store_false", + help="Whether to print coverage diffs", +) + + +_KNOWN_TABLES = ( + "BASE", + "CFF ", + "DSIG", + "GDEF", + "GPOS", + "GSUB", + "LTSH", + "OS/2", + "VORG", + "cmap", + "cvt ", + "fpgm", + "gasp", + "glyf", + "hdmx", + "head", + "hhea", + "hmtx", + "loca", + "maxp", + "name", + "post", + "prep", + "FFTM", + "kern", + "vhea", + "vmtx", +) def CompareSize(font_filename1, font_filename2, args): - """Prints a size comparison for two fonts. - - If so flagged (--diff_tables), prints per-table size change. - - Args: - font_filename1: The first font to compare. - font_filename2: The second font to compare. - Returns: - String describing size differences. - Raises: - OSError: If either argument doesn't point to a file. errno.ENOENT. - """ - if not (os.path.isfile(font_filename1) and os.path.isfile(font_filename2)): - raise OSError(errno.ENOENT, 'Missing at least one of %s and %s' % ( - os.path.basename(font_filename1), os.path.basename(font_filename2))) - - font_sz1 = os.stat(font_filename1).st_size - font_sz2 = os.stat(font_filename2).st_size - result = '%s (%d) vs %s (%d) (%+d)\n' % ( - os.path.basename(font_filename1), font_sz1, - os.path.basename(font_filename2), font_sz2, font_sz2 - font_sz1) - - if args.diff_tables: - result += DiffTables(font_filename1, font_filename2) - - return result + """Prints a size comparison for two fonts. + + If so flagged (--diff_tables), prints per-table size change. + + Args: + font_filename1: The first font to compare. + font_filename2: The second font to compare. + Returns: + String describing size differences. + Raises: + OSError: If either argument doesn't point to a file. errno.ENOENT. + """ + if not (os.path.isfile(font_filename1) and os.path.isfile(font_filename2)): + raise OSError( + errno.ENOENT, + "Missing at least one of %s and %s" + % (os.path.basename(font_filename1), os.path.basename(font_filename2)), + ) + + font_sz1 = os.stat(font_filename1).st_size + font_sz2 = os.stat(font_filename2).st_size + result = "%s (%d) vs %s (%d) (%+d)\n" % ( + os.path.basename(font_filename1), + font_sz1, + os.path.basename(font_filename2), + font_sz2, + font_sz2 - font_sz1, + ) + + if args.diff_tables: + result += DiffTables(font_filename1, font_filename2) + + return result def DiffTables(font_filename1, font_filename2): - """Prints a table-by-table size comparison of two fonts. - - Args: - font_filename1: The first font to compare. - font_filename2: The second font to compare. - Returns: - String describing size difference. One line per unique table in either font. - """ - result = [' Table Changes Delta-Bytes(from=>to) % Change'] - result.append(' -------------------------------------------------') - sfnt1 = sfnt.SFNTReader(open(font_filename1, 'rb')) - sfnt2 = sfnt.SFNTReader(open(font_filename2, 'rb')) - - font_sz1 = os.stat(font_filename1).st_size - - sum_tables1 = 0 - sum_tables2 = 0 - - table_l1_l2s = [] - for t in fonts.UniqueSort(sfnt1.tables, sfnt2.tables, _KNOWN_TABLES): - table1_sz = sfnt1.tables[t].length if t in sfnt1 else 0 - table2_sz = sfnt2.tables[t].length if t in sfnt2 else 0 - sum_tables1 += table1_sz - sum_tables2 += table2_sz - table_l1_l2s.append((t, table1_sz, table2_sz)) - - for (table, table1_sz, table2_sz) in table_l1_l2s: - delta_pct = float(table2_sz - table1_sz) / font_sz1 * 100 - result.append(' %s %+6d %06d => %06d %+10.1f%%' % ( - table, table2_sz - table1_sz, table1_sz, table2_sz, delta_pct)) - - delta_pct = float(sum_tables2 - sum_tables1) / font_sz1 * 100 - result.append(' TOTAL %+6d %06d => %06d %+10.1f%%' % ( - sum_tables2 - sum_tables1, sum_tables1, sum_tables2, delta_pct)) - - return '\n'.join(result) + """Prints a table-by-table size comparison of two fonts. + + Args: + font_filename1: The first font to compare. + font_filename2: The second font to compare. + Returns: + String describing size difference. One line per unique table in either font. + """ + result = [" Table Changes Delta-Bytes(from=>to) % Change"] + result.append(" -------------------------------------------------") + sfnt1 = sfnt.SFNTReader(open(font_filename1, "rb")) + sfnt2 = sfnt.SFNTReader(open(font_filename2, "rb")) + + font_sz1 = os.stat(font_filename1).st_size + + sum_tables1 = 0 + sum_tables2 = 0 + + table_l1_l2s = [] + for t in fonts.UniqueSort(sfnt1.tables, sfnt2.tables, _KNOWN_TABLES): + table1_sz = sfnt1.tables[t].length if t in sfnt1 else 0 + table2_sz = sfnt2.tables[t].length if t in sfnt2 else 0 + sum_tables1 += table1_sz + sum_tables2 += table2_sz + table_l1_l2s.append((t, table1_sz, table2_sz)) + + for table, table1_sz, table2_sz in table_l1_l2s: + delta_pct = float(table2_sz - table1_sz) / font_sz1 * 100 + result.append( + " %s %+6d %06d => %06d %+10.1f%%" + % (table, table2_sz - table1_sz, table1_sz, table2_sz, delta_pct) + ) + + delta_pct = float(sum_tables2 - sum_tables1) / font_sz1 * 100 + result.append( + " TOTAL %+6d %06d => %06d %+10.1f%%" + % (sum_tables2 - sum_tables1, sum_tables1, sum_tables2, delta_pct) + ) + + return "\n".join(result) def DiffCoverage(font_filename1, font_filename2, subset): - """Prints a comparison of the coverage of a given subset by two fonts. - - Args: - font_filename1: The first font to compare. - font_filename2: The second font to compare. - subset: The lowercase name of the subset to compare coverage of. - """ - f1cps = CodepointsInFont(font_filename1) - f2cps = CodepointsInFont(font_filename2) - - if subset != 'all': - subset_cps = CodepointsInSubset(subset) - f1cps &= subset_cps - f2cps &= subset_cps - else: - subset_cps = None - - subset_cp_str = ('/%d' % len(subset_cps)) if subset_cps is not None else '' - - print(' %s %+d (%d%s => %d%s)' % ( - subset, len(f2cps) - len(f1cps), len(f1cps), subset_cp_str, len(f2cps), - subset_cp_str)) + """Prints a comparison of the coverage of a given subset by two fonts. + + Args: + font_filename1: The first font to compare. + font_filename2: The second font to compare. + subset: The lowercase name of the subset to compare coverage of. + """ + f1cps = CodepointsInFont(font_filename1) + f2cps = CodepointsInFont(font_filename2) + + if subset != "all": + subset_cps = CodepointsInSubset(subset) + f1cps &= subset_cps + f2cps &= subset_cps + else: + subset_cps = None + + subset_cp_str = ("/%d" % len(subset_cps)) if subset_cps is not None else "" + + print( + " %s %+d (%d%s => %d%s)" + % ( + subset, + len(f2cps) - len(f1cps), + len(f1cps), + subset_cp_str, + len(f2cps), + subset_cp_str, + ) + ) def CompareDirs(font1, font2, args): - """Compares fonts by assuming font1/2 are dirs containing METADATA.pb.""" + """Compares fonts by assuming font1/2 are dirs containing METADATA.pb.""" - m1 = fonts.Metadata(font1) - m2 = fonts.Metadata(font2) + m1 = fonts.Metadata(font1) + m2 = fonts.Metadata(font2) - subsets_to_compare = fonts.UniqueSort(m1.subsets, m2.subsets) - subsets_to_compare.remove('menu') - subsets_to_compare.append('all') + subsets_to_compare = fonts.UniqueSort(m1.subsets, m2.subsets) + subsets_to_compare.remove("menu") + subsets_to_compare.append("all") - font_filename1 = os.path.join(font1, fonts.RegularWeight(m1)) - font_filename2 = os.path.join(font2, fonts.RegularWeight(m2)) + font_filename1 = os.path.join(font1, fonts.RegularWeight(m1)) + font_filename2 = os.path.join(font2, fonts.RegularWeight(m2)) - if args.ndiff_coverage: - print('Subset Coverage Change (codepoints)') - for subset in subsets_to_compare: - DiffCoverage(font_filename1, font_filename2, subset) + if args.ndiff_coverage: + print("Subset Coverage Change (codepoints)") + for subset in subsets_to_compare: + DiffCoverage(font_filename1, font_filename2, subset) - print(CompareSize(font_filename1, font_filename2, args)) + print(CompareSize(font_filename1, font_filename2, args)) def CompareFiles(font1, font2, args): - """Compares fonts assuming font1/2 are font files.""" - print(CompareSize(font1, font2, args)) + """Compares fonts assuming font1/2 are font files.""" + print(CompareSize(font1, font2, args)) def main(args=None): - args = parser.parse_args(args) + args = parser.parse_args(args) - font1 = args.first_font - font2 = args.second_font - dirs = os.path.isdir(font1) and os.path.isdir(font2) - files = os.path.isfile(font1) and os.path.isfile(font2) - if not dirs and not files: - print('%s and %s must both point to directories or font files' % ( - font1, font2)) - sys.exit(1) + font1 = args.first_font + font2 = args.second_font + dirs = os.path.isdir(font1) and os.path.isdir(font2) + files = os.path.isfile(font1) and os.path.isfile(font2) + if not dirs and not files: + print("%s and %s must both point to directories or font files" % (font1, font2)) + sys.exit(1) - if dirs: - CompareDirs(font1, font2, args) + if dirs: + CompareDirs(font1, font2, args) - if files: - CompareFiles(font1, font2, args) + if files: + CompareFiles(font1, font2, args) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/family_html_snippet.py b/Lib/gftools/scripts/family_html_snippet.py index 08ad79bd..7f926876 100755 --- a/Lib/gftools/scripts/family_html_snippet.py +++ b/Lib/gftools/scripts/family_html_snippet.py @@ -43,167 +43,174 @@ import json import requests import sys -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter GF_API = "https://www.googleapis.com/webfonts/v1/webfonts?key={}" GF_API_WEIGHT_TO_CSS_WEIGHT = { - "100": "100", - "100italic": "100i", - "200": "200", - "200italic": "200i", - "300": "300", - "300italic": "300i", - "regular": "400", - "italic": "400i", - "500": "500", - "500italic": "500i", - "600": "600", - "600italic": "600i", - "700": "700", - "700italic": "700i", - "800": "800", - "800italic": "800i", - "900": "900", - "900italic": "900i" + "100": "100", + "100italic": "100i", + "200": "200", + "200italic": "200i", + "300": "300", + "300italic": "300i", + "regular": "400", + "italic": "400i", + "500": "500", + "500italic": "500i", + "600": "600", + "600italic": "600i", + "700": "700", + "700italic": "700i", + "800": "800", + "800italic": "800i", + "900": "900", + "900italic": "900i", } API_TO_CSS_STYLE_NAME = { - "100": "a", - "100i": "b", - "200": "c", - "200i": "d", - "300": "e", - "300i": "f", - "400": "g", - "400i": "h", - "500": "i", - "500i": "j", - "600": "k", - "600i": "l", - "700": "m", - "700i": "n", - "800": "o", - "800i": "p", - "900": "q", - "900i": "r", + "100": "a", + "100i": "b", + "200": "c", + "200i": "d", + "300": "e", + "300i": "f", + "400": "g", + "400i": "h", + "500": "i", + "500i": "j", + "600": "k", + "600i": "l", + "700": "m", + "700i": "n", + "800": "o", + "800i": "p", + "900": "q", + "900i": "r", } def get_gf_family(family, api_key): - """Get data of the given family hosted on Google Fonts""" - request = requests.get(GF_API.format(api_key)) - - try: - response = json.loads(request.text) - if "error" in response: - if response["error"]["errors"][0]["reason"] == "keyInvalid": - sys.exit(("The Google Fonts API key '{}'" - " was rejected as being invalid !").format(api_key)) - else: - sys.exit(("There were errors in the" - " Google Fonts API request:" - " {}").format(response["error"])) - else: - gf_families = response - except (ValueError, KeyError): - sys.exit("Unable to load and parse data from Google Web Fonts API.") - - for item in gf_families['items']: - if family == item['family']: - return item - return False + """Get data of the given family hosted on Google Fonts""" + request = requests.get(GF_API.format(api_key)) + + try: + response = json.loads(request.text) + if "error" in response: + if response["error"]["errors"][0]["reason"] == "keyInvalid": + sys.exit( + ( + "The Google Fonts API key '{}'" + " was rejected as being invalid !" + ).format(api_key) + ) + else: + sys.exit( + ( + "There were errors in the" " Google Fonts API request:" " {}" + ).format(response["error"]) + ) + else: + gf_families = response + except (ValueError, KeyError): + sys.exit("Unable to load and parse data from Google Web Fonts API.") + + for item in gf_families["items"]: + if family == item["family"]: + return item + return False def get_family_styles(gf_family): - """Get all the styles of a family""" - styles = [] - if gf_family: - for var in gf_family['variants']: - styles.append((GF_API_WEIGHT_TO_CSS_WEIGHT[var])) - return styles + """Get all the styles of a family""" + styles = [] + if gf_family: + for var in gf_family["variants"]: + styles.append((GF_API_WEIGHT_TO_CSS_WEIGHT[var])) + return styles def get_family_subsets(family_subsets, gf_family): - """Get all the valid subsets from the given family""" - valid_subsets = [] - if family_subsets: - for subset in family_subsets: - if subset in gf_family['subsets']: - valid_subsets.append(subset) - return valid_subsets + """Get all the valid subsets from the given family""" + valid_subsets = [] + if family_subsets: + for subset in family_subsets: + if subset in gf_family["subsets"]: + valid_subsets.append(subset) + return valid_subsets def gen_head_webfonts(family, styles, subsets=None): - """Gen the html snippet to load fonts""" - server = '"https://fonts.googleapis.com/css?family=' - if subsets: - return '' % ( - server, family.replace(' ', '+'), ','.join(styles), ','.join(subsets) + """Gen the html snippet to load fonts""" + server = '"https://fonts.googleapis.com/css?family=' + if subsets: + return '' % ( + server, + family.replace(" ", "+"), + ",".join(styles), + ",".join(subsets), + ) + return '' % ( + server, + family.replace(" ", "+"), + ",".join(styles), ) - return '' % ( - server, family.replace(' ', '+'), ','.join(styles) - ) def gen_css_styles(family, styles): - css = [] - for style in styles: - if style.endswith('i'): - css.append((".%s{font-family: '%s'; " - "font-weight:%s; " - "font-style: italic;}" % ( - API_TO_CSS_STYLE_NAME[style], - family, - style[:-1]) - )) - else: - css.append((".%s{font-family: '%s'; " - "font-weight:%s;}" % ( - API_TO_CSS_STYLE_NAME[style], - family, - style) - )) - return css + css = [] + for style in styles: + if style.endswith("i"): + css.append( + ( + ".%s{font-family: '%s'; " + "font-weight:%s; " + "font-style: italic;}" + % (API_TO_CSS_STYLE_NAME[style], family, style[:-1]) + ) + ) + else: + css.append( + ( + ".%s{font-family: '%s'; " + "font-weight:%s;}" % (API_TO_CSS_STYLE_NAME[style], family, style) + ) + ) + return css def gen_body_text(styles, sample_text): - html = [] - for style in styles: - html.append("

%s

" % ( - API_TO_CSS_STYLE_NAME[style], - sample_text) - ) - return html + html = [] + for style in styles: + html.append( + "

%s

" % (API_TO_CSS_STYLE_NAME[style], sample_text) + ) + return html def main(args=None): - parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) - parser.add_argument('key', - help='Key from Google Fonts Developer API') - parser.add_argument('family', - help='family name on fonts.google.com') - parser.add_argument('sample_text', - help='sample text used for each font') - parser.add_argument('--subsets', nargs='+', - help='family subset(s) seperated by a space') - args = parser.parse_args(args) - - gf_family = get_gf_family(args.family, args.key) - family_styles = get_family_styles(gf_family) - family_subsets = get_family_subsets(args.subsets, gf_family) - - if family_subsets: - head_fonts = gen_head_webfonts(args.family, family_styles, family_subsets) - else: - head_fonts = gen_head_webfonts(args.family, family_styles) - - css_styles = gen_css_styles(args.family, family_styles) - body_text = gen_body_text(family_styles, args.sample_text) - - html = """ + parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) + parser.add_argument("key", help="Key from Google Fonts Developer API") + parser.add_argument("family", help="family name on fonts.google.com") + parser.add_argument("sample_text", help="sample text used for each font") + parser.add_argument( + "--subsets", nargs="+", help="family subset(s) seperated by a space" + ) + args = parser.parse_args(args) + + gf_family = get_gf_family(args.family, args.key) + family_styles = get_family_styles(gf_family) + family_subsets = get_family_subsets(args.subsets, gf_family) + + if family_subsets: + head_fonts = gen_head_webfonts(args.family, family_styles, family_subsets) + else: + head_fonts = gen_head_webfonts(args.family, family_styles) + + css_styles = gen_css_styles(args.family, family_styles) + body_text = gen_body_text(family_styles, args.sample_text) + + html = """ %s @@ -215,12 +222,12 @@ def main(args=None): %s """ % ( - head_fonts, - '\n '.join(css_styles), - '\n '.join(body_text) - ) - print(html) + head_fonts, + "\n ".join(css_styles), + "\n ".join(body_text), + ) + print(html) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/find_features.py b/Lib/gftools/scripts/find_features.py index 37597c0b..e6f30543 100755 --- a/Lib/gftools/scripts/find_features.py +++ b/Lib/gftools/scripts/find_features.py @@ -25,46 +25,51 @@ def ListFeatures(font): - """List features for specified font. Table assumed structured like GPS/GSUB. + """List features for specified font. Table assumed structured like GPS/GSUB. - Args: - font: a TTFont. - Returns: - List of 3-tuples of ('GPOS', tag, name) of the features in the font. - """ - results = [] - for tbl in ["GPOS", "GSUB"]: - if tbl in font.keys(): - results += [ - (tbl, - f.FeatureTag, - "lookups: [{}]".format(", ".join(map(str, f.Feature.LookupListIndex))) - ) for f in font[tbl].table.FeatureList.FeatureRecord - ] - return results + Args: + font: a TTFont. + Returns: + List of 3-tuples of ('GPOS', tag, name) of the features in the font. + """ + results = [] + for tbl in ["GPOS", "GSUB"]: + if tbl in font.keys(): + results += [ + ( + tbl, + f.FeatureTag, + "lookups: [{}]".format( + ", ".join(map(str, f.Feature.LookupListIndex)) + ), + ) + for f in font[tbl].table.FeatureList.FeatureRecord + ] + return results def main(args=None): - parser = ArgumentParser(description=__doc__) - parser.add_argument('path', metavar="PATH", - help='Path to a font file or directory') + parser = ArgumentParser(description=__doc__) + parser.add_argument("path", metavar="PATH", help="Path to a font file or directory") - args = parser.parse_args(args) - if args.path.endswith(".ttf"): - font_files = [args.path] - elif os.path.isdir(args.path): - font_files = glob(args.path + "/*.ttf") + args = parser.parse_args(args) + if args.path.endswith(".ttf"): + font_files = [args.path] + elif os.path.isdir(args.path): + font_files = glob(args.path + "/*.ttf") - for font_file in font_files: - features = [] - with TTFont(font_file) as font: - features += ListFeatures(font) + for font_file in font_files: + features = [] + with TTFont(font_file) as font: + features += ListFeatures(font) - for (table, tag, lookup_name) in features: - print('{:32s} {:4s} {:8s} {:15s}'.format( - os.path.basename(font_file), table, str(tag), lookup_name)) + for table, tag, lookup_name in features: + print( + "{:32s} {:4s} {:8s} {:15s}".format( + os.path.basename(font_file), table, str(tag), lookup_name + ) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() - diff --git a/Lib/gftools/scripts/fix_ascii_fontmetadata.py b/Lib/gftools/scripts/fix_ascii_fontmetadata.py index 46911887..c99fd888 100755 --- a/Lib/gftools/scripts/fix_ascii_fontmetadata.py +++ b/Lib/gftools/scripts/fix_ascii_fontmetadata.py @@ -21,15 +21,16 @@ from gftools.fix import fix_ascii_fontmetadata -description = 'Fixes TTF NAME table strings to be ascii only' +description = "Fixes TTF NAME table strings to be ascii only" parser = argparse.ArgumentParser(description=description) -parser.add_argument('ttf_font', nargs='+', - help="Font in OpenType (TTF/OTF) format") +parser.add_argument("ttf_font", nargs="+", help="Font in OpenType (TTF/OTF) format") + def main(args=None): - args = parser.parse_args(args) - for path in args.ttf_font: - fix_ascii_fontmetadata(ttLib.TTFont(path)) + args = parser.parse_args(args) + for path in args.ttf_font: + fix_ascii_fontmetadata(ttLib.TTFont(path)) + if __name__ == "__main__": - main() + main() diff --git a/Lib/gftools/scripts/fix_cmap.py b/Lib/gftools/scripts/fix_cmap.py index 242c5ea3..30c21363 100755 --- a/Lib/gftools/scripts/fix_cmap.py +++ b/Lib/gftools/scripts/fix_cmap.py @@ -15,49 +15,73 @@ # limitations under the License. # from argparse import ArgumentParser -from gftools.fix import convert_cmap_subtables_to_v4, drop_nonpid0_cmap, drop_mac_cmap, FontFixer +from gftools.fix import ( + convert_cmap_subtables_to_v4, + drop_nonpid0_cmap, + drop_mac_cmap, + FontFixer, +) description = "Manipulate a collection of fonts' cmap tables." def convert_cmap_subtables_to_v4_with_report(font): - converted = convert_cmap_subtables_to_v4(font) - for c in converted: - print(('Converted format {} cmap subtable' - ' with Platform ID = {} and Encoding ID = {}' - ' to format 4.').format(c)) - return converted + converted = convert_cmap_subtables_to_v4(font) + for c in converted: + print( + ( + "Converted format {} cmap subtable" + " with Platform ID = {} and Encoding ID = {}" + " to format 4." + ).format(c) + ) + return converted + def main(args=None): - parser = ArgumentParser(description=description) - parser.add_argument('fonts', nargs='+') - parser.add_argument('--format-4-subtables', '-f4', default=False, - action='store_true', - help="Convert cmap subtables to format 4") - parser.add_argument('--drop-mac-subtable', '-dm', default=False, - action='store_true', - help='Drop Mac cmap subtables') - parser.add_argument('--keep-only-pid-0', '-k0', default=False, - action='store_true', - help=('Keep only cmap subtables with pid=0' - ' and drop the rest.')) - args = parser.parse_args(args) - - for path in args.fonts: - fixer = FontFixer(path, verbose=True) - if args.format_4_subtables: - print('\nConverting Cmap subtables to format 4...') - fixer.fixes.append(convert_cmap_subtables_to_v4_with_report) - - if args.keep_only_pid_0: - print('\nDropping all Cmap subtables,' - ' except the ones with PlatformId = 0...') - fixer.fixes.append(drop_nonpid0_cmap) - elif args.drop_mac_subtable: - print('\nDropping any Cmap Mac subtable...') - fixer.fixes.append(drop_mac_cmap) - - fixer.fix() - -if __name__ == '__main__': - main() + parser = ArgumentParser(description=description) + parser.add_argument("fonts", nargs="+") + parser.add_argument( + "--format-4-subtables", + "-f4", + default=False, + action="store_true", + help="Convert cmap subtables to format 4", + ) + parser.add_argument( + "--drop-mac-subtable", + "-dm", + default=False, + action="store_true", + help="Drop Mac cmap subtables", + ) + parser.add_argument( + "--keep-only-pid-0", + "-k0", + default=False, + action="store_true", + help=("Keep only cmap subtables with pid=0" " and drop the rest."), + ) + args = parser.parse_args(args) + + for path in args.fonts: + fixer = FontFixer(path, verbose=True) + if args.format_4_subtables: + print("\nConverting Cmap subtables to format 4...") + fixer.fixes.append(convert_cmap_subtables_to_v4_with_report) + + if args.keep_only_pid_0: + print( + "\nDropping all Cmap subtables," + " except the ones with PlatformId = 0..." + ) + fixer.fixes.append(drop_nonpid0_cmap) + elif args.drop_mac_subtable: + print("\nDropping any Cmap Mac subtable...") + fixer.fixes.append(drop_mac_cmap) + + fixer.fix() + + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_family.py b/Lib/gftools/scripts/fix_family.py index ff3c80f5..93edbcc7 100755 --- a/Lib/gftools/scripts/fix_family.py +++ b/Lib/gftools/scripts/fix_family.py @@ -40,16 +40,13 @@ def main(args=None): action="store_true", help="Fix font issues that should be fixed in the source files.", ) - parser.add_argument( - "--rename-family", - help="Change the family's name" - ) + parser.add_argument("--rename-family", help="Change the family's name") parser.add_argument( "--fvar-instance-axis-dflts", help=( "Set the fvar instance default values for non-wght axes. e.g " "wdth=100 opsz=36" - ) + ), ) args = parser.parse_args(args) diff --git a/Lib/gftools/scripts/fix_familymetadata.py b/Lib/gftools/scripts/fix_familymetadata.py index 6aa42a98..93a22519 100755 --- a/Lib/gftools/scripts/fix_familymetadata.py +++ b/Lib/gftools/scripts/fix_familymetadata.py @@ -18,24 +18,27 @@ import os import tabulate from fontTools import ttLib -from gftools.constants import (PLATFORM_ID__WINDOWS, - NAMEID_STR, - NAMEID_FONT_FAMILY_NAME, - NAMEID_FONT_SUBFAMILY_NAME, - NAMEID_FULL_FONT_NAME, - NAMEID_POSTSCRIPT_NAME, - NAMEID_TYPOGRAPHIC_FAMILY_NAME, - NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME, - NAMEID_COMPATIBLE_FULL_MACONLY) - -parser = argparse.ArgumentParser(description=("Print out family" - " metadata of the fonts")) -parser.add_argument('font', nargs="+") -parser.add_argument('--csv', default=False, action='store_true') +from gftools.constants import ( + PLATFORM_ID__WINDOWS, + NAMEID_STR, + NAMEID_FONT_FAMILY_NAME, + NAMEID_FONT_SUBFAMILY_NAME, + NAMEID_FULL_FONT_NAME, + NAMEID_POSTSCRIPT_NAME, + NAMEID_TYPOGRAPHIC_FAMILY_NAME, + NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME, + NAMEID_COMPATIBLE_FULL_MACONLY, +) + +parser = argparse.ArgumentParser( + description=("Print out family" " metadata of the fonts") +) +parser.add_argument("font", nargs="+") +parser.add_argument("--csv", default=False, action="store_true") class FamilyMetadataTable(object): - headers = ['filename'] + headers = ["filename"] rows = [] current_row = [] @@ -52,53 +55,56 @@ def putrowToTable(self): self.rows.append(self.current_row) def binary_string(self, value): - return "{:#010b} {:#010b}".format(value >> 8, - value & 0xFF).replace('0b', '') + return "{:#010b} {:#010b}".format(value >> 8, value & 0xFF).replace("0b", "") + def putfsSelection(self, ttfont): - self.addToHeader('fsSelection') - self.current_row.append(self.binary_string(ttfont['OS/2'].fsSelection)) + self.addToHeader("fsSelection") + self.current_row.append(self.binary_string(ttfont["OS/2"].fsSelection)) def putmacStyle(self, ttfont): - self.addToHeader('macStyle') - self.current_row.append(self.binary_string(ttfont['head'].macStyle)) + self.addToHeader("macStyle") + self.current_row.append(self.binary_string(ttfont["head"].macStyle)) def putnameIds(self, ttfont, platform=PLATFORM_ID__WINDOWS): - for nameid in [NAMEID_FONT_FAMILY_NAME, - NAMEID_FONT_SUBFAMILY_NAME, - NAMEID_FULL_FONT_NAME, - NAMEID_POSTSCRIPT_NAME, - NAMEID_TYPOGRAPHIC_FAMILY_NAME, - NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME, - NAMEID_COMPATIBLE_FULL_MACONLY]: - value = '' - for name in ttfont['name'].names: + for nameid in [ + NAMEID_FONT_FAMILY_NAME, + NAMEID_FONT_SUBFAMILY_NAME, + NAMEID_FULL_FONT_NAME, + NAMEID_POSTSCRIPT_NAME, + NAMEID_TYPOGRAPHIC_FAMILY_NAME, + NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME, + NAMEID_COMPATIBLE_FULL_MACONLY, + ]: + value = "" + for name in ttfont["name"].names: if nameid == name.nameID and platform == name.platformID: - value = name.string.decode(name.getEncoding()) or '' + value = name.string.decode(name.getEncoding()) or "" break - self.addToHeader('{}:{}'.format(nameid, NAMEID_STR[nameid])) + self.addToHeader("{}:{}".format(nameid, NAMEID_STR[nameid])) self.current_row.append(value) def putitalicAngle(self, ttfont): - self.addToHeader('italicAngle') - self.current_row.append(ttfont['post'].italicAngle) + self.addToHeader("italicAngle") + self.current_row.append(ttfont["post"].italicAngle) def putwidthClass(self, ttfont): - self.addToHeader('usWidthClass') - self.current_row.append(ttfont['OS/2'].usWidthClass) + self.addToHeader("usWidthClass") + self.current_row.append(ttfont["OS/2"].usWidthClass) def putweightClass(self, ttfont): - self.addToHeader('usWeightClass') - self.current_row.append(ttfont['OS/2'].usWeightClass) + self.addToHeader("usWeightClass") + self.current_row.append(ttfont["OS/2"].usWeightClass) def putPanose(self, ttfont): - for i, k in enumerate(sorted(ttfont['OS/2'].panose.__dict__.keys())): + for i, k in enumerate(sorted(ttfont["OS/2"].panose.__dict__.keys())): self.addToHeader(k) - self.current_row.append(getattr(ttfont['OS/2'].panose, k, 0)) + self.current_row.append(getattr(ttfont["OS/2"].panose, k, 0)) def putfixedPitch(self, ttfont): - self.addToHeader('isFixedPitch') - self.current_row.append(ttfont['post'].isFixedPitch) + self.addToHeader("isFixedPitch") + self.current_row.append(ttfont["post"].isFixedPitch) + def main(args=None): options = parser.parse_args(args) @@ -120,6 +126,7 @@ def main(args=None): def as_csv(rows): import csv import sys + writer = csv.writer(sys.stdout) writer.writerows([fm.headers]) writer.writerows(rows) @@ -131,5 +138,5 @@ def as_csv(rows): print(tabulate.tabulate(fm.rows, fm.headers)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/Lib/gftools/scripts/fix_font.py b/Lib/gftools/scripts/fix_font.py index c5a19eb0..4a178210 100755 --- a/Lib/gftools/scripts/fix_font.py +++ b/Lib/gftools/scripts/fix_font.py @@ -31,16 +31,13 @@ def main(args=None): action="store_true", help="Fix font issues that should be fixed in the source files.", ) - parser.add_argument( - "--rename-family", - help="Change the family's name" - ) + parser.add_argument("--rename-family", help="Change the family's name") parser.add_argument( "--fvar-instance-axis-dflts", help=( "Set the fvar instance default values for non-wght axes. e.g " "wdth=100 opsz=36" - ) + ), ) args = parser.parse_args(args) diff --git a/Lib/gftools/scripts/fix_fstype.py b/Lib/gftools/scripts/fix_fstype.py index 00242cf5..9b05a4b5 100755 --- a/Lib/gftools/scripts/fix_fstype.py +++ b/Lib/gftools/scripts/fix_fstype.py @@ -24,21 +24,18 @@ https://www.microsoft.com/typography/otspec/os2.htm#fst """ from __future__ import print_function -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter from gftools.fix import fix_fs_type, FontFixer -parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) -parser.add_argument('fonts', - nargs="+", - help="Fonts in OpenType (TTF/OTF) format") + +parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) +parser.add_argument("fonts", nargs="+", help="Fonts in OpenType (TTF/OTF) format") def main(args=None): - args = parser.parse_args(args) - for font_path in args.fonts: - FontFixer(font_path, fixes=[fix_fs_type], verbose=True).fix() + args = parser.parse_args(args) + for font_path in args.fonts: + FontFixer(font_path, fixes=[fix_fs_type], verbose=True).fix() -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_gasp.py b/Lib/gftools/scripts/fix_gasp.py index 7a5938a7..f3404c78 100755 --- a/Lib/gftools/scripts/fix_gasp.py +++ b/Lib/gftools/scripts/fix_gasp.py @@ -19,14 +19,16 @@ import argparse from gftools.fix import GaspFixer -description = 'Fixes TTF GASP table' +description = "Fixes TTF GASP table" parser = argparse.ArgumentParser(description=description) -parser.add_argument('ttf_font', nargs='+', - help="Font in OpenType (TTF/OTF) format") -parser.add_argument('--autofix', action='store_true', help='Apply autofix') -parser.add_argument('--set', type=int, - help=('Change gasprange value of key 65535' - ' to new value'), default=None) +parser.add_argument("ttf_font", nargs="+", help="Font in OpenType (TTF/OTF) format") +parser.add_argument("--autofix", action="store_true", help="Apply autofix") +parser.add_argument( + "--set", + type=int, + help=("Change gasprange value of key 65535" " to new value"), + default=None, +) def main(args=None): @@ -39,5 +41,6 @@ def main(args=None): else: GaspFixer(path).show() -if __name__ == '__main__': - main() + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_glyph_private_encoding.py b/Lib/gftools/scripts/fix_glyph_private_encoding.py index ff7c2709..6bcd5bed 100755 --- a/Lib/gftools/scripts/fix_glyph_private_encoding.py +++ b/Lib/gftools/scripts/fix_glyph_private_encoding.py @@ -23,29 +23,33 @@ from gftools.utils import get_unencoded_glyphs -description = 'Fixes TTF unencoded glyphs to have Private Use Area encodings' +description = "Fixes TTF unencoded glyphs to have Private Use Area encodings" parser = argparse.ArgumentParser(description=description) -parser.add_argument('ttf_font', nargs='+', - help='Font in OpenType (TTF/OTF) format') -parser.add_argument('--autofix', action="store_true", - help='Apply autofix. ' - 'Otherwise just check if there are unencoded glyphs') +parser.add_argument("ttf_font", nargs="+", help="Font in OpenType (TTF/OTF) format") +parser.add_argument( + "--autofix", + action="store_true", + help="Apply autofix. " "Otherwise just check if there are unencoded glyphs", +) def main(args=None): - args = parser.parse_args(args) - for path in args.ttf_font: - if not os.path.exists(path): - continue - - if args.autofix: - FontFixer(path, fixes=[fix_pua], verbose=True).fix() - else: - font = ttLib.TTFont(path, 0) - print(("\nThese are the unencoded glyphs in font file '{0}':\n" - "{1}").format(path, '\n'.join(get_unencoded_glyphs(font)))) - -if __name__ == '__main__': - main() - + args = parser.parse_args(args) + for path in args.ttf_font: + if not os.path.exists(path): + continue + + if args.autofix: + FontFixer(path, fixes=[fix_pua], verbose=True).fix() + else: + font = ttLib.TTFont(path, 0) + print( + ("\nThese are the unencoded glyphs in font file '{0}':\n" "{1}").format( + path, "\n".join(get_unencoded_glyphs(font)) + ) + ) + + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_glyphs.py b/Lib/gftools/scripts/fix_glyphs.py index 7674dac1..ac361d7e 100755 --- a/Lib/gftools/scripts/fix_glyphs.py +++ b/Lib/gftools/scripts/fix_glyphs.py @@ -15,9 +15,11 @@ # limitations under the License. # def main(args=None): - print("This script is deprecated (it never really did anything anyway)") - print(r"You may be looking for https://github.com/googlefonts/gf-glyphs-scripts/blob/main/Google%20Fonts/fixfonts.py instead") + print("This script is deprecated (it never really did anything anyway)") + print( + r"You may be looking for https://github.com/googlefonts/gf-glyphs-scripts/blob/main/Google%20Fonts/fixfonts.py instead" + ) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_hinting.py b/Lib/gftools/scripts/fix_hinting.py index da31a5ab..f6b174d6 100755 --- a/Lib/gftools/scripts/fix_hinting.py +++ b/Lib/gftools/scripts/fix_hinting.py @@ -30,6 +30,7 @@ import argparse from gftools.fix import fix_hinted_font, FontFixer + def main(args=None): parser = argparse.ArgumentParser() parser.add_argument("font") @@ -38,6 +39,5 @@ def main(args=None): FontFixer(args.font, fixes=[fix_hinted_font], verbose=True).fix() -if __name__ == '__main__': +if __name__ == "__main__": main() - diff --git a/Lib/gftools/scripts/fix_isfixedpitch.py b/Lib/gftools/scripts/fix_isfixedpitch.py index 1c8c63ab..c2107cfd 100755 --- a/Lib/gftools/scripts/fix_isfixedpitch.py +++ b/Lib/gftools/scripts/fix_isfixedpitch.py @@ -37,6 +37,7 @@ from gftools.fix import fix_isFixedPitch, FontFixer import argparse + def main(args=None): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--fonts", nargs="+", required=True) @@ -45,6 +46,6 @@ def main(args=None): for font in args.fonts: FontFixer(font, fixes=[fix_isFixedPitch], verbose=True).fix() + if __name__ == "__main__": main() - diff --git a/Lib/gftools/scripts/fix_nameids.py b/Lib/gftools/scripts/fix_nameids.py index e0851b08..56f0650a 100755 --- a/Lib/gftools/scripts/fix_nameids.py +++ b/Lib/gftools/scripts/fix_nameids.py @@ -22,59 +22,71 @@ from gftools.fix import drop_mac_names, drop_superfluous_mac_names, FontFixer -parser = argparse.ArgumentParser(description='Print out nameID' - ' strings of the fonts') -parser.add_argument('font', nargs="+") -parser.add_argument('--autofix', default=False, - action='store_true', help='Apply autofix') -parser.add_argument('--csv', default=False, action='store_true', - help="Output data in comma-separate-values" - " (CSV) file format") -parser.add_argument('--id', '-i', default='all') -parser.add_argument('--platform', '-p', type=int, default=3) -parser.add_argument('--drop-superfluous-mac-names', '-ms', default=False, - action='store_true', - help='Drop superfluous Mac names') -parser.add_argument('--drop-mac-names', '-m', default=False, - action='store_true', - help='Drop all Mac name fields') +parser = argparse.ArgumentParser(description="Print out nameID" " strings of the fonts") +parser.add_argument("font", nargs="+") +parser.add_argument( + "--autofix", default=False, action="store_true", help="Apply autofix" +) +parser.add_argument( + "--csv", + default=False, + action="store_true", + help="Output data in comma-separate-values" " (CSV) file format", +) +parser.add_argument("--id", "-i", default="all") +parser.add_argument("--platform", "-p", type=int, default=3) +parser.add_argument( + "--drop-superfluous-mac-names", + "-ms", + default=False, + action="store_true", + help="Drop superfluous Mac names", +) +parser.add_argument( + "--drop-mac-names", + "-m", + default=False, + action="store_true", + help="Drop all Mac name fields", +) def delete_non_platform1_names(font): changed = False - for name in font['name'].names: + for name in font["name"].names: if name.platformID != 1: del name changed = True return changed + def main(args=None): args = parser.parse_args(args) - nameids = ['1', '2', '4', '6', '16', '17', '18'] - user_nameids = [x.strip() for x in args.id.split(',')] + nameids = ["1", "2", "4", "6", "16", "17", "18"] + user_nameids = [x.strip() for x in args.id.split(",")] - if 'all' not in user_nameids: + if "all" not in user_nameids: nameids = set(nameids) & set(user_nameids) rows = [] for font in args.font: ttfont = ttLib.TTFont(font) row = [os.path.basename(font)] - for name in ttfont['name'].names: - if str(name.nameID) not in nameids or\ - name.platformID != args.platform: + for name in ttfont["name"].names: + if str(name.nameID) not in nameids or name.platformID != args.platform: continue - value = name.string.decode(name.getEncoding()) or '' + value = name.string.decode(name.getEncoding()) or "" row.append(value) rows.append(row) - header = ['filename'] + ['id' + x for x in nameids] + header = ["filename"] + ["id" + x for x in nameids] def as_csv(rows): import csv import sys + writer = csv.writer(sys.stdout) writer.writerows([header]) writer.writerows(rows) @@ -93,16 +105,16 @@ def as_csv(rows): if has_mac_names(ttLib.TTFont(path)): fixer.fixes.append(drop_superfluous_mac_names) else: - print('font %s has no mac nametable' % path) + print("font %s has no mac nametable" % path) if args.drop_mac_names: if has_mac_names(ttLib.TTFont(path)): fixer.fixes.append(drop_mac_names) else: - print('font %s has no mac nametable' % path) + print("font %s has no mac nametable" % path) fixer.fix() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/Lib/gftools/scripts/fix_nonhinting.py b/Lib/gftools/scripts/fix_nonhinting.py index 50e7c01a..05496011 100755 --- a/Lib/gftools/scripts/fix_nonhinting.py +++ b/Lib/gftools/scripts/fix_nonhinting.py @@ -46,63 +46,59 @@ contains the minimal recommended instructions. """ from __future__ import print_function -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter import os from fontTools import ttLib from fontTools.ttLib.tables import ttProgram from gftools.fix import fix_unhinted_font -parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) -parser.add_argument('fontfile_in', - nargs=1, - help="Font in OpenType (TTF/OTF) format") -parser.add_argument('fontfile_out', - nargs=1, - help="Filename for the output") +parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) +parser.add_argument("fontfile_in", nargs=1, help="Font in OpenType (TTF/OTF) format") +parser.add_argument("fontfile_out", nargs=1, help="Filename for the output") + def main(args=None): - args = parser.parse_args(args) + args = parser.parse_args(args) - # Open the font file supplied as the first argument on the command line - fontfile_in = os.path.abspath(args.fontfile_in[0]) - font = ttLib.TTFont(fontfile_in) + # Open the font file supplied as the first argument on the command line + fontfile_in = os.path.abspath(args.fontfile_in[0]) + font = ttLib.TTFont(fontfile_in) - # Save a backup - backupfont = '{}-backup-fonttools-prep-gasp{}'.format(fontfile_in[0:-4], - fontfile_in[-4:]) - # print "Saving to ", backupfont - font.save(backupfont) - print(backupfont, " saved.") + # Save a backup + backupfont = "{}-backup-fonttools-prep-gasp{}".format( + fontfile_in[0:-4], fontfile_in[-4:] + ) + # print "Saving to ", backupfont + font.save(backupfont) + print(backupfont, " saved.") - # Print the Gasp table - if "gasp" in font: - print("GASP was: ", font["gasp"].gaspRange) - else: - print("GASP wasn't there") + # Print the Gasp table + if "gasp" in font: + print("GASP was: ", font["gasp"].gaspRange) + else: + print("GASP wasn't there") - # Print the PREP table - if "prep" in font: - old_program = ttProgram.Program.getAssembly(font["prep"].program) - print("PREP was:\n\t" + "\n\t".join(old_program)) - else: - print("PREP wasn't there") + # Print the PREP table + if "prep" in font: + old_program = ttProgram.Program.getAssembly(font["prep"].program) + print("PREP was:\n\t" + "\n\t".join(old_program)) + else: + print("PREP wasn't there") - fix_unhinted_font(font) - # Print the Gasp table - print("GASP now: ", font["gasp"].gaspRange) + fix_unhinted_font(font) + # Print the Gasp table + print("GASP now: ", font["gasp"].gaspRange) - # Print the PREP table - current_program = ttProgram.Program.getAssembly(font["prep"].program) - print("PREP now:\n\t" + "\n\t".join(current_program)) + # Print the PREP table + current_program = ttProgram.Program.getAssembly(font["prep"].program) + print("PREP now:\n\t" + "\n\t".join(current_program)) - # Save the new file with the name of the input file - fontfile_out = os.path.abspath(args.fontfile_out[0]) - font.save(fontfile_out) - print(fontfile_out, " saved.") + # Save the new file with the name of the input file + fontfile_out = os.path.abspath(args.fontfile_out[0]) + font.save(fontfile_out) + print(fontfile_out, " saved.") -if __name__ == "__main__": - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_ttfautohint.py b/Lib/gftools/scripts/fix_ttfautohint.py index 9ed2c795..16bbeda3 100755 --- a/Lib/gftools/scripts/fix_ttfautohint.py +++ b/Lib/gftools/scripts/fix_ttfautohint.py @@ -19,10 +19,9 @@ import argparse from fontTools import ttLib -description = 'Fixes TTF Autohint table' +description = "Fixes TTF Autohint table" parser = argparse.ArgumentParser(description=description) -parser.add_argument('ttf_font', nargs='+', - help="Font in OpenType (TTF/OTF) format") +parser.add_argument("ttf_font", nargs="+", help="Font in OpenType (TTF/OTF) format") # TODO: # parser.add_argument('--autofix', action='store_true', help='Apply autofix') @@ -31,16 +30,16 @@ def main(args=None): args = parser.parse_args(args) for path in args.ttf_font: font = ttLib.TTFont(path) - if 'TTFA' in font.keys(): - content = font['TTFA'].__dict__['data'].strip() + if "TTFA" in font.keys(): + content = font["TTFA"].__dict__["data"].strip() ttfa_data = {} - for line in content.split('\n'): - key, value = line.strip().split('=') + for line in content.split("\n"): + key, value = line.strip().split("=") ttfa_data[key.strip()] = value.strip() print("TTFA table values for '{}':\n{}".format(path, ttfa_data)) else: print("'{}' lacks a TTFA table.".format(path)) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_vendorid.py b/Lib/gftools/scripts/fix_vendorid.py index 38cdfe5e..6cfb7344 100755 --- a/Lib/gftools/scripts/fix_vendorid.py +++ b/Lib/gftools/scripts/fix_vendorid.py @@ -22,54 +22,56 @@ # set up some command line argument processing -parser = argparse.ArgumentParser(description="Print vendorID" - " of TTF files") -parser.add_argument('arg_filepaths', nargs='+', - help='font file path(s) to check.' - ' Wildcards like *.ttf are allowed.') +parser = argparse.ArgumentParser(description="Print vendorID" " of TTF files") +parser.add_argument( + "arg_filepaths", + nargs="+", + help="font file path(s) to check." " Wildcards like *.ttf are allowed.", +) def main(args=None): - # set up a basic logging config - # to include timestamps - # log_format = '%(asctime)s %(levelname)-8s %(message)s' - global font - log_format = '%(levelname)-8s %(message)s ' - logger = logging.getLogger() - handler = logging.StreamHandler() - formatter = logging.Formatter(log_format) - handler.setFormatter(formatter) - logger.addHandler(handler) - args = parser.parse_args(args) + # set up a basic logging config + # to include timestamps + # log_format = '%(asctime)s %(levelname)-8s %(message)s' + global font + log_format = "%(levelname)-8s %(message)s " + logger = logging.getLogger() + handler = logging.StreamHandler() + formatter = logging.Formatter(log_format) + handler.setFormatter(formatter) + logger.addHandler(handler) + args = parser.parse_args(args) - # ------------------------------------------------------ - logging.debug("Checking each file is a ttf") - fonts_to_check = [] - for arg_filepath in sorted(args.arg_filepaths): - # use glob.glob to accept *.ttf - for fullpath in glob.glob(arg_filepath): - file_path, file_name = os.path.split(fullpath) - if file_name.endswith(".ttf"): - fonts_to_check.append(fullpath) - else: - logging.warning("Skipping '{}' as it does not seem " - "to be valid TrueType font file.".format(file_name)) - fonts_to_check.sort() + # ------------------------------------------------------ + logging.debug("Checking each file is a ttf") + fonts_to_check = [] + for arg_filepath in sorted(args.arg_filepaths): + # use glob.glob to accept *.ttf + for fullpath in glob.glob(arg_filepath): + file_path, file_name = os.path.split(fullpath) + if file_name.endswith(".ttf"): + fonts_to_check.append(fullpath) + else: + logging.warning( + "Skipping '{}' as it does not seem " + "to be valid TrueType font file.".format(file_name) + ) + fonts_to_check.sort() - if fonts_to_check == []: - logging.error("None of the fonts are valid TrueType files!") + if fonts_to_check == []: + logging.error("None of the fonts are valid TrueType files!") - # ------------------------------------------------------ - for font_file in fonts_to_check: - font = ttLib.TTFont(font_file) - logging.info("OK: {} opened with fontTools".format(font_file)) + # ------------------------------------------------------ + for font_file in fonts_to_check: + font = ttLib.TTFont(font_file) + logging.info("OK: {} opened with fontTools".format(font_file)) - # ---------------------------------------------------- - vid = font['OS/2'].achVendID - print("[{}]: {}".format(vid, font_file)) + # ---------------------------------------------------- + vid = font["OS/2"].achVendID + print("[{}]: {}".format(vid, font_file)) __author__ = "The Google Fonts Tools Authors" -if __name__ == '__main__': - main() - +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_vertical_metrics.py b/Lib/gftools/scripts/fix_vertical_metrics.py index 8d8327a6..8bf4b282 100755 --- a/Lib/gftools/scripts/fix_vertical_metrics.py +++ b/Lib/gftools/scripts/fix_vertical_metrics.py @@ -40,30 +40,31 @@ class TextMetricsView(object): - def __init__(self): self.outstream = StringIO() - self._its_metrics_header = ['Parameter '] + self._its_metrics_header = ["Parameter "] # first column has a length of largest parameter # named OS/2.sTypoDescender - self._its_metrics = collections.OrderedDict([ - ('ymax', []), - ('hhea.ascent', []), - ('OS/2.sTypoAscender', []), - ('OS/2.usWinAscent', []), - ('ymin', []), - ('hhea.descent', []), - ('OS/2.sTypoDescender', []), - ('OS/2.usWinDescent', []), - ('hhea.lineGap', []), - ('OS/2.sTypoLineGap', []), - ('hhea total', []), - ('typo total', []), - ('win total', []), - ('UPM:Heights', []), - ('UPM:Heights %', []) - ]) + self._its_metrics = collections.OrderedDict( + [ + ("ymax", []), + ("hhea.ascent", []), + ("OS/2.sTypoAscender", []), + ("OS/2.usWinAscent", []), + ("ymin", []), + ("hhea.descent", []), + ("OS/2.sTypoDescender", []), + ("OS/2.usWinDescent", []), + ("hhea.lineGap", []), + ("OS/2.sTypoLineGap", []), + ("hhea total", []), + ("typo total", []), + ("win total", []), + ("UPM:Heights", []), + ("UPM:Heights %", []), + ] + ) self._inconsistent = set() self._inconsistent_table = {} self._warnings = [] @@ -84,49 +85,51 @@ def add_to_table(self, fontname, key, value): # It looks like json groupped by metrics key inconsistentRow = {} for r in self._inconsistent_table[key]: - if r['value'] == value: + if r["value"] == value: inconsistentRow = r if not inconsistentRow: - inconsistentRow = {'value': value, 'fonts': []} + inconsistentRow = {"value": value, "fonts": []} self._inconsistent_table[key].append(inconsistentRow) - inconsistentRow['fonts'].append(fontname) + inconsistentRow["fonts"].append(fontname) self._its_metrics[key].append(value) def add_metric(self, font_name, vmet): ymin, ymax = vmet.get_bounding() self._its_metrics_header.append(font_name) - self.add_to_table(font_name, 'hhea.ascent', vmet.ascents.hhea) - self.add_to_table(font_name, 'OS/2.sTypoAscender', vmet.ascents.os2typo) - self.add_to_table(font_name, 'OS/2.usWinAscent', vmet.ascents.os2win) - self.add_to_table(font_name, 'hhea.descent', vmet.descents.hhea) - self.add_to_table(font_name, 'OS/2.sTypoDescender', vmet.descents.os2typo) - self.add_to_table(font_name, 'OS/2.usWinDescent', vmet.descents.os2win) - self.add_to_table(font_name, 'hhea.lineGap', vmet.linegaps.hhea) - self.add_to_table(font_name, 'OS/2.sTypoLineGap', vmet.linegaps.os2typo) - self._its_metrics['ymax'].append(ymax) - self._its_metrics['ymin'].append(ymin) + self.add_to_table(font_name, "hhea.ascent", vmet.ascents.hhea) + self.add_to_table(font_name, "OS/2.sTypoAscender", vmet.ascents.os2typo) + self.add_to_table(font_name, "OS/2.usWinAscent", vmet.ascents.os2win) + self.add_to_table(font_name, "hhea.descent", vmet.descents.hhea) + self.add_to_table(font_name, "OS/2.sTypoDescender", vmet.descents.os2typo) + self.add_to_table(font_name, "OS/2.usWinDescent", vmet.descents.os2win) + self.add_to_table(font_name, "hhea.lineGap", vmet.linegaps.hhea) + self.add_to_table(font_name, "OS/2.sTypoLineGap", vmet.linegaps.os2typo) + self._its_metrics["ymax"].append(ymax) + self._its_metrics["ymin"].append(ymin) value = abs(ymin) + ymax - upm = '%s:%s' % (vmet.get_upm_height(), value) - self._its_metrics['UPM:Heights'].append(upm) + upm = "%s:%s" % (vmet.get_upm_height(), value) + self._its_metrics["UPM:Heights"].append(upm) value = (value / float(vmet.get_upm_height())) * 100 - self._its_metrics['UPM:Heights %'].append('%d %%' % value) + self._its_metrics["UPM:Heights %"].append("%d %%" % value) hhea_total = vmet.ascents.hhea + abs(vmet.descents.hhea) + vmet.linegaps.hhea - self._its_metrics['hhea total'].append(hhea_total) + self._its_metrics["hhea total"].append(hhea_total) - typo_total = vmet.ascents.os2typo + abs(vmet.descents.os2typo) + vmet.linegaps.os2typo - self._its_metrics['typo total'].append(typo_total) + typo_total = ( + vmet.ascents.os2typo + abs(vmet.descents.os2typo) + vmet.linegaps.os2typo + ) + self._its_metrics["typo total"].append(typo_total) win_total = vmet.ascents.os2win + abs(vmet.descents.os2win) - self._its_metrics['win total'].append(win_total) + self._its_metrics["win total"].append(win_total) if len(set([typo_total, hhea_total, win_total])) > 1: - self._warnings.append('%s has NOT even heights' % font_name) + self._warnings.append("%s has NOT even heights" % font_name) self.glyphs[font_name] = vmet.get_highest_and_lowest() @@ -139,19 +142,22 @@ def print_metrics(self): def print_warnings(self): if self._inconsistent: - _ = 'WARNING: Inconsistent {}' - print(_.format(' '.join([str(x) for x in self._inconsistent])), - end='\n\n', file=self.outstream) + _ = "WARNING: Inconsistent {}" + print( + _.format(" ".join([str(x) for x in self._inconsistent])), + end="\n\n", + file=self.outstream, + ) if self._warnings: for warn in self._warnings: - print('WARNING: %s' % warn, file=self.outstream) + print("WARNING: %s" % warn, file=self.outstream) def print_metrics_table(self): - formatstring = '' + formatstring = "" for k in self._its_metrics_header: - print(('{:<%s}' % (len(k) + 4)).format(k), end='', file=self.outstream) - formatstring += '{:<%s}' % (len(k) + 4) + print(("{:<%s}" % (len(k) + 4)).format(k), end="", file=self.outstream) + formatstring += "{:<%s}" % (len(k) + 4) print(file=self.outstream) for k, values in self._its_metrics.items(): @@ -163,9 +169,9 @@ def print_high_glyphs(self): if glyphs[0]: if not header_printed: print(file=self.outstream) - print('High Glyphs', file=self.outstream) + print("High Glyphs", file=self.outstream) header_printed = True - print(font + ':', ' '.join(glyphs[0]), file=self.outstream) + print(font + ":", " ".join(glyphs[0]), file=self.outstream) def print_low_glyphs(self): header_printed = False @@ -173,9 +179,9 @@ def print_low_glyphs(self): if glyphs[1]: if not header_printed: print(file=self.outstream) - print('Low Glyphs', file=self.outstream) + print("Low Glyphs", file=self.outstream) header_printed = True - print(font + ':', ' '.join(glyphs[1]), file=self.outstream) + print(font + ":", " ".join(glyphs[1]), file=self.outstream) def print_inconsistent_table(self): print(file=self.outstream) @@ -184,25 +190,26 @@ def print_inconsistent_table(self): tbl = {} for r in row: - if r['value'] == value: + if r["value"] == value: continue if metrickey not in tbl: tbl[metrickey] = [] - tbl[metrickey] += r['fonts'] + tbl[metrickey] += r["fonts"] for k, r in tbl.items(): - print('WARNING: Inconsistent %s:' % k, ', '.join(r), - file=self.outstream) + print( + "WARNING: Inconsistent %s:" % k, ", ".join(r), file=self.outstream + ) def find_max_occurs_from_metrics_key(self, metricvalues): result = 0 occurs = 0 if len(metricvalues) == 2: - return metricvalues[1]['value'] + return metricvalues[1]["value"] for v in metricvalues: - if len(v['fonts']) > occurs: - occurs = len(v['fonts']) - result = v['value'] + if len(v["fonts"]) > occurs: + occurs = len(v["fonts"]) + result = v["value"] return result def get_contents(self): @@ -212,163 +219,208 @@ def get_contents(self): parser = argparse.ArgumentParser() # ascent parameters -parser.add_argument('-a', '--ascents', type=int, - help=("Set new ascents value.")) - -parser.add_argument('-ah', '--ascents-hhea', type=int, - help=("Set new ascents value in 'Horizontal Header'" - " table ('hhea'). This argument" - " cancels --ascents.")) -parser.add_argument('-at', '--ascents-typo', type=int, - help=("Set new ascents value in 'Horizontal Header'" - " table ('OS/2'). This argument" - " cancels --ascents.")) -parser.add_argument('-aw', '--ascents-win', type=int, - help=("Set new ascents value in 'Horizontal Header'" - " table ('OS/2.Win'). This argument" - " cancels --ascents.")) +parser.add_argument("-a", "--ascents", type=int, help=("Set new ascents value.")) + +parser.add_argument( + "-ah", + "--ascents-hhea", + type=int, + help=( + "Set new ascents value in 'Horizontal Header'" + " table ('hhea'). This argument" + " cancels --ascents." + ), +) +parser.add_argument( + "-at", + "--ascents-typo", + type=int, + help=( + "Set new ascents value in 'Horizontal Header'" + " table ('OS/2'). This argument" + " cancels --ascents." + ), +) +parser.add_argument( + "-aw", + "--ascents-win", + type=int, + help=( + "Set new ascents value in 'Horizontal Header'" + " table ('OS/2.Win'). This argument" + " cancels --ascents." + ), +) # descent parameters -parser.add_argument('-d', '--descents', type=int, - help=("Set new descents value.")) -parser.add_argument('-dh', '--descents-hhea', type=int, - help=("Set new descents value in 'Horizontal Header'" - " table ('hhea'). This argument" - " cancels --descents.")) -parser.add_argument('-dt', '--descents-typo', type=int, - help=("Set new descents value in 'Horizontal Header'" - " table ('OS/2'). This argument" - " cancels --descents.")) -parser.add_argument('-dw', '--descents-win', type=int, - help=("Set new descents value in 'Horizontal Header'" - " table ('OS/2.Win'). This argument" - " cancels --descents.")) +parser.add_argument("-d", "--descents", type=int, help=("Set new descents value.")) +parser.add_argument( + "-dh", + "--descents-hhea", + type=int, + help=( + "Set new descents value in 'Horizontal Header'" + " table ('hhea'). This argument" + " cancels --descents." + ), +) +parser.add_argument( + "-dt", + "--descents-typo", + type=int, + help=( + "Set new descents value in 'Horizontal Header'" + " table ('OS/2'). This argument" + " cancels --descents." + ), +) +parser.add_argument( + "-dw", + "--descents-win", + type=int, + help=( + "Set new descents value in 'Horizontal Header'" + " table ('OS/2.Win'). This argument" + " cancels --descents." + ), +) # linegaps parameters -parser.add_argument('-l', '--linegaps', type=int, - help=("Set new linegaps value.")) -parser.add_argument('-lh', '--linegaps-hhea', type=int, - help=("Set new linegaps value in 'Horizontal Header'" - " table ('hhea')")) -parser.add_argument('-lt', '--linegaps-typo', type=int, - help=("Set new linegaps value in 'Horizontal Header'" - " table ('OS/2')")) - -parser.add_argument('--autofix', action="store_true", - help="Autofix font metrics") -parser.add_argument('ttf_font', nargs='+', metavar='ttf_font', - help="Font file in OpenType (TTF/OTF) format") +parser.add_argument("-l", "--linegaps", type=int, help=("Set new linegaps value.")) +parser.add_argument( + "-lh", + "--linegaps-hhea", + type=int, + help=("Set new linegaps value in 'Horizontal Header'" " table ('hhea')"), +) +parser.add_argument( + "-lt", + "--linegaps-typo", + type=int, + help=("Set new linegaps value in 'Horizontal Header'" " table ('OS/2')"), +) + +parser.add_argument("--autofix", action="store_true", help="Autofix font metrics") +parser.add_argument( + "ttf_font", + nargs="+", + metavar="ttf_font", + help="Font file in OpenType (TTF/OTF) format", +) def vmetrics(ttFonts): - from fontbakery.utils import get_bounding_box - v_metrics = {"ymin": 0, "ymax": 0} - for ttFont in ttFonts: - font_ymin, font_ymax = get_bounding_box(ttFont) - v_metrics["ymin"] = min(font_ymin, v_metrics["ymin"]) - v_metrics["ymax"] = max(font_ymax, v_metrics["ymax"]) - return v_metrics + from fontbakery.utils import get_bounding_box + v_metrics = {"ymin": 0, "ymax": 0} + for ttFont in ttFonts: + font_ymin, font_ymax = get_bounding_box(ttFont) + v_metrics["ymin"] = min(font_ymin, v_metrics["ymin"]) + v_metrics["ymax"] = max(font_ymax, v_metrics["ymax"]) + return v_metrics -def main(args=None): - options = parser.parse_args(args) - fonts = options.ttf_font - if options.ascents or \ - options.descents or \ - options.linegaps or \ - options.linegaps == 0 or \ - options.ascents_hhea or \ - options.ascents_typo or \ - options.ascents_win or \ - options.descents_hhea or \ - options.descents_typo or \ - options.descents_win or \ - options.linegaps_hhea or \ - options.linegaps_hhea == 0 or \ - options.linegaps_typo or \ - options.linegaps_typo == 0: - for f in fonts: - try: - ttfont = ttLib.TTFont(f) - except TTLibError as ex: - print('Error: {0}: {1}'.format(f, ex)) - continue - - if options.ascents: - ttfont['hhea'].ascent = options.ascents - ttfont['OS/2'].sTypoAscender = options.ascents - ttfont['OS/2'].usWinAscent = options.ascents - - if options.descents: - ttfont['hhea'].descent = options.descents - ttfont['OS/2'].sTypoDescender = options.descents - ttfont['OS/2'].usWinDescent = abs(options.descents) - - if options.linegaps or options.linegaps == 0: - ttfont['hhea'].lineGap = options.linegaps - ttfont['OS/2'].sTypoLineGap = options.linegaps - - if options.ascents_hhea: - ttfont['hhea'].ascent = options.ascents_hhea - if options.ascents_typo: - ttfont['OS/2'].sTypoAscender = options.ascents_typo - if options.ascents_win: - ttfont['OS/2'].usWinAscent = options.ascents_win - - if options.descents_hhea: - ttfont['hhea'].descent = options.descents_hhea - if options.descents_typo: - ttfont['OS/2'].sTypoDescender = options.descents_typo - if options.descents_win: - ttfont['OS/2'].usWinDescent = abs(options.descents_win) - - if options.linegaps_hhea or options.linegaps_hhea == 0: - ttfont['hhea'].lineGap = options.linegaps_hhea - if options.linegaps_typo or options.linegaps_typo == 0: - ttfont['OS/2'].sTypoLineGap = options.linegaps_typo - - ttfont.save(f[:-4] + '.fix.ttf') - - elif options.autofix: - ttFonts = [] - for f in fonts: - try: - ttFonts.append(ttLib.TTFont(f)) - except TTLibError as ex: - print('Error: {0}: {1}'.format(f, ex)) - continue - - v_metrics = vmetrics(ttFonts) - for ttfont in ttFonts: - ttfont['hhea'].ascent = v_metrics["ymax"] - ttfont['OS/2'].sTypoAscender = v_metrics["ymax"] - ttfont['OS/2'].usWinAscent = v_metrics["ymax"] - - ttfont['hhea'].descent = v_metrics["ymin"] - ttfont['OS/2'].sTypoDescender = v_metrics["ymin"] - ttfont['OS/2'].usWinDescent = abs(v_metrics["ymin"]) - - ttfont.save(ttfont.reader.file.name[:-4] + '.fix.ttf') - - else: - entries = [ - ('hhea', 'ascent'), - ('OS/2', 'sTypoAscender'), - ('OS/2', 'usWinAscent'), - ('hhea', 'descent'), - ('OS/2', 'sTypoDescender'), - ('OS/2', 'usWinDescent'), - ('hhea', 'lineGap'), - ('OS/2', 'sTypoLineGap') - ] - - for f in fonts: - ttfont = ttLib.TTFont(f) - print ("## {}".format(f)) - for table, field in entries: - print ("{} {}: {}".format(table, field, getattr(ttfont[table], field))) - print() - -if __name__ == '__main__': - main() +def main(args=None): + options = parser.parse_args(args) + fonts = options.ttf_font + if ( + options.ascents + or options.descents + or options.linegaps + or options.linegaps == 0 + or options.ascents_hhea + or options.ascents_typo + or options.ascents_win + or options.descents_hhea + or options.descents_typo + or options.descents_win + or options.linegaps_hhea + or options.linegaps_hhea == 0 + or options.linegaps_typo + or options.linegaps_typo == 0 + ): + for f in fonts: + try: + ttfont = ttLib.TTFont(f) + except TTLibError as ex: + print("Error: {0}: {1}".format(f, ex)) + continue + + if options.ascents: + ttfont["hhea"].ascent = options.ascents + ttfont["OS/2"].sTypoAscender = options.ascents + ttfont["OS/2"].usWinAscent = options.ascents + + if options.descents: + ttfont["hhea"].descent = options.descents + ttfont["OS/2"].sTypoDescender = options.descents + ttfont["OS/2"].usWinDescent = abs(options.descents) + + if options.linegaps or options.linegaps == 0: + ttfont["hhea"].lineGap = options.linegaps + ttfont["OS/2"].sTypoLineGap = options.linegaps + + if options.ascents_hhea: + ttfont["hhea"].ascent = options.ascents_hhea + if options.ascents_typo: + ttfont["OS/2"].sTypoAscender = options.ascents_typo + if options.ascents_win: + ttfont["OS/2"].usWinAscent = options.ascents_win + + if options.descents_hhea: + ttfont["hhea"].descent = options.descents_hhea + if options.descents_typo: + ttfont["OS/2"].sTypoDescender = options.descents_typo + if options.descents_win: + ttfont["OS/2"].usWinDescent = abs(options.descents_win) + + if options.linegaps_hhea or options.linegaps_hhea == 0: + ttfont["hhea"].lineGap = options.linegaps_hhea + if options.linegaps_typo or options.linegaps_typo == 0: + ttfont["OS/2"].sTypoLineGap = options.linegaps_typo + + ttfont.save(f[:-4] + ".fix.ttf") + + elif options.autofix: + ttFonts = [] + for f in fonts: + try: + ttFonts.append(ttLib.TTFont(f)) + except TTLibError as ex: + print("Error: {0}: {1}".format(f, ex)) + continue + + v_metrics = vmetrics(ttFonts) + for ttfont in ttFonts: + ttfont["hhea"].ascent = v_metrics["ymax"] + ttfont["OS/2"].sTypoAscender = v_metrics["ymax"] + ttfont["OS/2"].usWinAscent = v_metrics["ymax"] + + ttfont["hhea"].descent = v_metrics["ymin"] + ttfont["OS/2"].sTypoDescender = v_metrics["ymin"] + ttfont["OS/2"].usWinDescent = abs(v_metrics["ymin"]) + + ttfont.save(ttfont.reader.file.name[:-4] + ".fix.ttf") + + else: + entries = [ + ("hhea", "ascent"), + ("OS/2", "sTypoAscender"), + ("OS/2", "usWinAscent"), + ("hhea", "descent"), + ("OS/2", "sTypoDescender"), + ("OS/2", "usWinDescent"), + ("hhea", "lineGap"), + ("OS/2", "sTypoLineGap"), + ] + + for f in fonts: + ttfont = ttLib.TTFont(f) + print("## {}".format(f)) + for table, field in entries: + print("{} {}: {}".format(table, field, getattr(ttfont[table], field))) + print() + + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/fix_weightclass.py b/Lib/gftools/scripts/fix_weightclass.py index f550c0d7..f8f46821 100755 --- a/Lib/gftools/scripts/fix_weightclass.py +++ b/Lib/gftools/scripts/fix_weightclass.py @@ -26,9 +26,9 @@ def main(font_path): FontFixer(font_path, verbose=True, fixes=[fix_weight_class]).fix() + if __name__ == "__main__": if len(sys.argv) != 2: print("Please include a path to a font") else: main(sys.argv[1]) - diff --git a/Lib/gftools/scripts/font_dependencies.py b/Lib/gftools/scripts/font_dependencies.py index 315605ff..d6d18db8 100644 --- a/Lib/gftools/scripts/font_dependencies.py +++ b/Lib/gftools/scripts/font_dependencies.py @@ -16,10 +16,7 @@ gftools font-dependencies read font.ttf -o requirements.txt """ from argparse import ArgumentParser -from gftools.builder.dependencies import ( - write_font_requirements, - read_font_requirements -) +from gftools.builder.dependencies import write_font_requirements, read_font_requirements from fontTools.ttLib import TTFont from fontTools.misc.cliTools import makeOutputFileName @@ -65,9 +62,7 @@ def main(args=None): try: requirements = read_font_requirements(ttfont) except KeyError: - parser.error( - "Font doesn't contain dependencies" - ) + parser.error("Font doesn't contain dependencies") if args.out: with open(args.out, "w") as doc: doc.write(requirements) diff --git a/Lib/gftools/scripts/font_diff.py b/Lib/gftools/scripts/font_diff.py index 0062c787..9025fc7b 100755 --- a/Lib/gftools/scripts/font_diff.py +++ b/Lib/gftools/scripts/font_diff.py @@ -22,8 +22,10 @@ def main(args=None): - print("This code has been deprecated; use gftools-compare-font or diffenator2 instead") + print( + "This code has been deprecated; use gftools-compare-font or diffenator2 instead" + ) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/font_tags.py b/Lib/gftools/scripts/font_tags.py index 8f2a2798..36d6a7e6 100644 --- a/Lib/gftools/scripts/font_tags.py +++ b/Lib/gftools/scripts/font_tags.py @@ -40,7 +40,9 @@ def main(args=None): args = parser.parse_args(args) if not is_google_fonts_repo(args.gf_path): - raise ValueError(f"'{args.gf_path.absolute()}' is not a path to a valid google/fonts repo") + raise ValueError( + f"'{args.gf_path.absolute()}' is not a path to a valid google/fonts repo" + ) gf_tags = GFTags() diff --git a/Lib/gftools/scripts/font_weights_coverage.py b/Lib/gftools/scripts/font_weights_coverage.py index 98b56750..618bcbd1 100755 --- a/Lib/gftools/scripts/font_weights_coverage.py +++ b/Lib/gftools/scripts/font_weights_coverage.py @@ -34,37 +34,37 @@ from gfsubsets import CodepointsInFont -parser = argparse.ArgumentParser(description='Compare size and coverage of two fonts') -parser.add_argument('dirpath', help="a directory containing font files.", metavar="DIR") +parser = argparse.ArgumentParser(description="Compare size and coverage of two fonts") +parser.add_argument("dirpath", help="a directory containing font files.", metavar="DIR") + def main(args=None): - args = parser.parse_args(args) + args = parser.parse_args(args) - cps = set() - for f in _GetFontFiles(args.dirpath): - cps.update(CodepointsInFont(os.path.join(args.dirpath, f))) + cps = set() + for f in _GetFontFiles(args.dirpath): + cps.update(CodepointsInFont(os.path.join(args.dirpath, f))) - for f in _GetFontFiles(args.dirpath): - diff = cps - CodepointsInFont(os.path.join(args.dirpath, f)) - if bool(diff): - print('%s failed' % (f)) - for c in diff: - print('0x%04X' % (c)) - else: - print('%s passed' % (f)) + for f in _GetFontFiles(args.dirpath): + diff = cps - CodepointsInFont(os.path.join(args.dirpath, f)) + if bool(diff): + print("%s failed" % (f)) + for c in diff: + print("0x%04X" % (c)) + else: + print("%s passed" % (f)) def _GetFontFiles(path): - """Returns list of font files in a path. + """Returns list of font files in a path. - Args: - path: directory path - Returns: - Set of font files - """ - return [f for f in listdir(path) - if os.path.splitext(f)[1] in ('.ttf', '.otf')] + Args: + path: directory path + Returns: + Set of font files + """ + return [f for f in listdir(path) if os.path.splitext(f)[1] in (".ttf", ".otf")] -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/gen_push_lists.py b/Lib/gftools/scripts/gen_push_lists.py index 9106d9e2..1e49310b 100755 --- a/Lib/gftools/scripts/gen_push_lists.py +++ b/Lib/gftools/scripts/gen_push_lists.py @@ -27,6 +27,7 @@ from contextlib import contextmanager import pygit2 + @contextmanager def in_google_fonts_repo(gf_path): cwd = os.getcwd() @@ -73,7 +74,7 @@ def main(args=None): tags = GFTags() tags.to_csv(gf_path / "tags" / "all" / "families.csv") repo = pygit2.Repository(str(gf_path)) - if any("tags/all/families.csv" in d.delta.new_file.path for d in repo.diff()): + if any("tags/all/families.csv" in d.delta.new_file.path for d in repo.diff()): with open(to_sandbox_fp, "r", encoding="utf-8") as doc: string = doc.read() string += "\n# Tags\ntags/all/families.csv\n" diff --git a/Lib/gftools/scripts/lang_sample_text.py b/Lib/gftools/scripts/lang_sample_text.py index 57deb50b..be16c74d 100755 --- a/Lib/gftools/scripts/lang_sample_text.py +++ b/Lib/gftools/scripts/lang_sample_text.py @@ -13,8 +13,7 @@ from absl import app from absl import flags -from gflanguages import (LoadLanguages, - LoadRegions) +from gflanguages import LoadLanguages, LoadRegions from gftools import fonts_public_pb2 from gftools.util.udhr import Udhr from google.protobuf import text_format @@ -26,213 +25,272 @@ import yaml FLAGS = flags.FLAGS -flags.DEFINE_string('lang', None, 'Path to lang metadata package', short_name='l') -flags.DEFINE_string('udhrs', None, 'Path to UDHR translations (XML)', short_name='u') -flags.DEFINE_string('samples', None, 'Path to per-family samples from noto-data-dev repo', short_name='s') +flags.DEFINE_string("lang", None, "Path to lang metadata package", short_name="l") +flags.DEFINE_string("udhrs", None, "Path to UDHR translations (XML)", short_name="u") +flags.DEFINE_string( + "samples", + None, + "Path to per-family samples from noto-data-dev repo", + short_name="s", +) + def _ReadProto(proto, path): - with open(path, 'r', encoding='utf-8') as f: - proto = text_format.Parse(f.read(), proto) - return proto + with open(path, "r", encoding="utf-8") as f: + proto = text_format.Parse(f.read(), proto) + return proto -def _WriteProto(proto, path, comments = None): - with open(path, 'w', newline='') as f: - textproto = text_format.MessageToString(proto, as_utf8=True) - if comments is not None: - lines = [s if s not in comments else s + ' # ' + comments[s] for s in textproto.split('\n')] - textproto = '\n'.join(lines) - f.write(textproto) +def _WriteProto(proto, path, comments=None): + with open(path, "w", newline="") as f: + textproto = text_format.MessageToString(proto, as_utf8=True) + if comments is not None: + lines = [ + s if s not in comments else s + " # " + comments[s] + for s in textproto.split("\n") + ] + textproto = "\n".join(lines) + f.write(textproto) def _GetLanguageForUdhr(languages, udhr): - for l in languages.values(): - if (l.language == udhr.iso639_3 and l.script == udhr.iso15924) or \ - l.id == udhr.bcp47: - return l + for l in languages.values(): + if ( + l.language == udhr.iso639_3 and l.script == udhr.iso15924 + ) or l.id == udhr.bcp47: + return l - language = fonts_public_pb2.LanguageProto() - language.id = udhr.bcp47 - language.language = udhr.bcp47.split('_')[0] - language.script = udhr.iso15924 - language.name = udhr.name.replace(' (', ', ').replace(')', '') - return language + language = fonts_public_pb2.LanguageProto() + language.id = udhr.bcp47 + language.language = udhr.bcp47.split("_")[0] + language.script = udhr.iso15924 + language.name = udhr.name.replace(" (", ", ").replace(")", "") + return language def _ReplaceInSampleText(languages): - for l in languages.values(): - if l.script == 'Latn' or not l.HasField('sample_text'): - continue - if '-' in l.sample_text.masthead_full: - l.sample_text.masthead_full = l.sample_text.masthead_full.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.masthead_partial: - l.sample_text.masthead_partial = l.sample_text.masthead_partial.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.styles: - l.sample_text.styles = l.sample_text.styles.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.tester: - l.sample_text.tester = l.sample_text.tester.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.poster_sm: - l.sample_text.poster_sm = l.sample_text.poster_sm.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.poster_md: - l.sample_text.poster_md = l.sample_text.poster_md.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.poster_lg: - l.sample_text.poster_lg = l.sample_text.poster_lg.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.specimen_48: - l.sample_text.specimen_48 = l.sample_text.specimen_48.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.specimen_36: - l.sample_text.specimen_36 = l.sample_text.specimen_36.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.specimen_32: - l.sample_text.specimen_32 = l.sample_text.specimen_32.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.specimen_21: - l.sample_text.specimen_21 = l.sample_text.specimen_21.replace(' - ', ' ').replace('-', '').strip() - if '-' in l.sample_text.specimen_16: - l.sample_text.specimen_16 = l.sample_text.specimen_16.replace(' - ', ' ').replace('-', '').strip() - - _WriteProto(l, os.path.join(FLAGS.lang, 'languages', l.id + '.textproto')) + for l in languages.values(): + if l.script == "Latn" or not l.HasField("sample_text"): + continue + if "-" in l.sample_text.masthead_full: + l.sample_text.masthead_full = ( + l.sample_text.masthead_full.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.masthead_partial: + l.sample_text.masthead_partial = ( + l.sample_text.masthead_partial.replace(" - ", " ") + .replace("-", "") + .strip() + ) + if "-" in l.sample_text.styles: + l.sample_text.styles = ( + l.sample_text.styles.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.tester: + l.sample_text.tester = ( + l.sample_text.tester.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.poster_sm: + l.sample_text.poster_sm = ( + l.sample_text.poster_sm.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.poster_md: + l.sample_text.poster_md = ( + l.sample_text.poster_md.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.poster_lg: + l.sample_text.poster_lg = ( + l.sample_text.poster_lg.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.specimen_48: + l.sample_text.specimen_48 = ( + l.sample_text.specimen_48.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.specimen_36: + l.sample_text.specimen_36 = ( + l.sample_text.specimen_36.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.specimen_32: + l.sample_text.specimen_32 = ( + l.sample_text.specimen_32.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.specimen_21: + l.sample_text.specimen_21 = ( + l.sample_text.specimen_21.replace(" - ", " ").replace("-", "").strip() + ) + if "-" in l.sample_text.specimen_16: + l.sample_text.specimen_16 = ( + l.sample_text.specimen_16.replace(" - ", " ").replace("-", "").strip() + ) + + _WriteProto(l, os.path.join(FLAGS.lang, "languages", l.id + ".textproto")) def main(argv): - languages = LoadLanguages(base_dir=FLAGS.lang) - regions = LoadRegions(base_dir=FLAGS.lang) - - if FLAGS.samples: - assert len(argv) > 1, 'No METADATA.pb files specified' - line_to_lang_name = {} - for l in languages: - line = 'languages: "{code}"'.format(code=languages[l].id) - line_to_lang_name[line] = languages[l].name - samples = {} - for sample_filename in os.listdir(FLAGS.samples): - key = os.path.splitext(os.path.basename(sample_filename))[0] - samples[key] = os.path.join(FLAGS.samples, sample_filename) - for path in argv[1:]: - family = _ReadProto(fonts_public_pb2.FamilyProto(), path) - if True:#len(family.languages) == 0 or family.name == 'Noto Sans Tamil Supplement': - key = family.name.replace(' ', '') - if key not in samples: - print('Family not found in samples: ' + family.name) - continue - with open(samples[key], 'r') as f: - sample_data = yaml.safe_load(f) - sample_text = fonts_public_pb2.SampleTextProto() - sample_text.masthead_full = sample_data['masthead_full'] - sample_text.masthead_partial = sample_data['masthead_partial'] - sample_text.styles = sample_data['styles'] - sample_text.tester = sample_data['tester'] - sample_text.poster_sm = sample_data['poster_sm'] - sample_text.poster_md = sample_data['poster_md'] - sample_text.poster_lg = sample_data['poster_lg'] - family.sample_text.MergeFrom(sample_text) - _WriteProto(family, path, comments=line_to_lang_name) - - if not FLAGS.udhrs: - return - - if FLAGS.udhrs.endswith('.yaml'): - with open(FLAGS.udhrs, 'r') as f: - data = yaml.safe_load(f) - for translation, meta in data.items(): - if 'lang_full' not in meta or meta['lang_full'] not in ['ccp-Beng-IN', 'lad-Hebr-IL']: - continue - language = meta['lang'] - if language.startswith('und-'): - continue - script = re.search(r'.*-(.*)-.*', meta['lang_full']).group(1) if 'script' not in meta else meta['script'] - key = language + '_' + script - iso639_3 = meta['lang_639_3'] - iso15924 = script - name = meta['name_lang'] if 'name_udhr' not in meta else meta['name_udhr'] - udhr = Udhr( - key=key, - iso639_3=iso639_3, - iso15924=iso15924, - bcp47=key, - direction=None, - ohchr=None, - stage=4, - loc=None, - name=name - ) - udhr.LoadArticleOne(translation) - - language = _GetLanguageForUdhr(languages, udhr) - if not language.HasField('sample_text'): - language.sample_text.MergeFrom(udhr.GetSampleTexts()) - if 'name_autonym' in meta and not language.HasField('autonym'): - language.autonym = meta['name_autonym'].strip() - _WriteProto(language, os.path.join(FLAGS.lang, 'languages', language.id + '.textproto')) - - elif FLAGS.udhrs.endswith('.csv'): - with open(FLAGS.udhrs, newline='') as csvfile: - reader = csv.reader(csvfile, delimiter=',', quotechar='"') - head = next(reader) - index_id = head.index('id') - index_name = head.index('language') - index_historical = head.index('historical') - index_sample = head.index('SAMPLE') - for row in reader: - id = row[index_id] - if id in languages: - language = languages[row[index_id]] - else: - language = fonts_public_pb2.LanguageProto() - language.id = id - language.language, language.script = id.split('_') - language.name = row[index_name] - historical = row[index_historical] == 'X' - if language.historical != historical: - if historical: - language.historical = True - else: - language.ClearField('historical') - sample = row[index_sample] - if sample and not sample.startswith('http'): - udhr = Udhr( - key=id, - iso639_3=language.language, - iso15924=language.script, - bcp47=id, - direction=None, - ohchr=None, - stage=4, - loc=None, - name=None - ) - udhr.LoadArticleOne(sample) - if not language.HasField('sample_text'): + languages = LoadLanguages(base_dir=FLAGS.lang) + regions = LoadRegions(base_dir=FLAGS.lang) + + if FLAGS.samples: + assert len(argv) > 1, "No METADATA.pb files specified" + line_to_lang_name = {} + for l in languages: + line = 'languages: "{code}"'.format(code=languages[l].id) + line_to_lang_name[line] = languages[l].name + samples = {} + for sample_filename in os.listdir(FLAGS.samples): + key = os.path.splitext(os.path.basename(sample_filename))[0] + samples[key] = os.path.join(FLAGS.samples, sample_filename) + for path in argv[1:]: + family = _ReadProto(fonts_public_pb2.FamilyProto(), path) + if ( + True + ): # len(family.languages) == 0 or family.name == 'Noto Sans Tamil Supplement': + key = family.name.replace(" ", "") + if key not in samples: + print("Family not found in samples: " + family.name) + continue + with open(samples[key], "r") as f: + sample_data = yaml.safe_load(f) + sample_text = fonts_public_pb2.SampleTextProto() + sample_text.masthead_full = sample_data["masthead_full"] + sample_text.masthead_partial = sample_data["masthead_partial"] + sample_text.styles = sample_data["styles"] + sample_text.tester = sample_data["tester"] + sample_text.poster_sm = sample_data["poster_sm"] + sample_text.poster_md = sample_data["poster_md"] + sample_text.poster_lg = sample_data["poster_lg"] + family.sample_text.MergeFrom(sample_text) + _WriteProto(family, path, comments=line_to_lang_name) + + if not FLAGS.udhrs: + return + + if FLAGS.udhrs.endswith(".yaml"): + with open(FLAGS.udhrs, "r") as f: + data = yaml.safe_load(f) + for translation, meta in data.items(): + if "lang_full" not in meta or meta["lang_full"] not in [ + "ccp-Beng-IN", + "lad-Hebr-IL", + ]: + continue + language = meta["lang"] + if language.startswith("und-"): + continue + script = ( + re.search(r".*-(.*)-.*", meta["lang_full"]).group(1) + if "script" not in meta + else meta["script"] + ) + key = language + "_" + script + iso639_3 = meta["lang_639_3"] + iso15924 = script + name = ( + meta["name_lang"] if "name_udhr" not in meta else meta["name_udhr"] + ) + udhr = Udhr( + key=key, + iso639_3=iso639_3, + iso15924=iso15924, + bcp47=key, + direction=None, + ohchr=None, + stage=4, + loc=None, + name=name, + ) + udhr.LoadArticleOne(translation) + + language = _GetLanguageForUdhr(languages, udhr) + if not language.HasField("sample_text"): + language.sample_text.MergeFrom(udhr.GetSampleTexts()) + if "name_autonym" in meta and not language.HasField("autonym"): + language.autonym = meta["name_autonym"].strip() + _WriteProto( + language, + os.path.join(FLAGS.lang, "languages", language.id + ".textproto"), + ) + + elif FLAGS.udhrs.endswith(".csv"): + with open(FLAGS.udhrs, newline="") as csvfile: + reader = csv.reader(csvfile, delimiter=",", quotechar='"') + head = next(reader) + index_id = head.index("id") + index_name = head.index("language") + index_historical = head.index("historical") + index_sample = head.index("SAMPLE") + for row in reader: + id = row[index_id] + if id in languages: + language = languages[row[index_id]] + else: + language = fonts_public_pb2.LanguageProto() + language.id = id + language.language, language.script = id.split("_") + language.name = row[index_name] + historical = row[index_historical] == "X" + if language.historical != historical: + if historical: + language.historical = True + else: + language.ClearField("historical") + sample = row[index_sample] + if sample and not sample.startswith("http"): + udhr = Udhr( + key=id, + iso639_3=language.language, + iso15924=language.script, + bcp47=id, + direction=None, + ohchr=None, + stage=4, + loc=None, + name=None, + ) + udhr.LoadArticleOne(sample) + if not language.HasField("sample_text"): + language.sample_text.MergeFrom(udhr.GetSampleTexts()) + _WriteProto( + language, + os.path.join(FLAGS.lang, "languages", language.id + ".textproto"), + ) + + elif os.path.isdir(FLAGS.udhrs): + for udhr_path in glob.glob(os.path.join(FLAGS.udhrs, "*")): + if udhr_path.endswith("index.xml") or os.path.basename( + udhr_path + ).startswith("status"): + continue + udhr_data = etree.parse(udhr_path) + head = udhr_data.getroot() + for name, value in head.attrib.items(): + if re.search(r"\{.*\}lang", name): + bcp47 = value.replace("-", "_") + udhr = Udhr( + key=head.get("key"), + iso639_3=head.get("iso639-3"), + iso15924=head.get("iso15924"), + bcp47=bcp47, + direction=head.get("dir"), + ohchr=None, + stage=4, + loc=None, + name=head.get("n"), + ) + udhr.Parse(udhr_data) + + language = _GetLanguageForUdhr(languages, udhr) + if language.id in languages or language.HasField("sample_text"): + continue language.sample_text.MergeFrom(udhr.GetSampleTexts()) - _WriteProto(language, os.path.join(FLAGS.lang, 'languages', language.id + '.textproto')) - - elif os.path.isdir(FLAGS.udhrs): - for udhr_path in glob.glob(os.path.join(FLAGS.udhrs, '*')): - if udhr_path.endswith('index.xml') or os.path.basename(udhr_path).startswith('status'): - continue - udhr_data = etree.parse(udhr_path) - head = udhr_data.getroot() - for name, value in head.attrib.items(): - if re.search(r'\{.*\}lang', name): - bcp47 = value.replace('-', '_') - udhr = Udhr( - key=head.get('key'), - iso639_3=head.get('iso639-3'), - iso15924=head.get('iso15924'), - bcp47=bcp47, - direction=head.get('dir'), - ohchr=None, - stage=4, - loc=None, - name=head.get('n')) - udhr.Parse(udhr_data) - - language = _GetLanguageForUdhr(languages, udhr) - if language.id in languages or language.HasField('sample_text'): - continue - language.sample_text.MergeFrom(udhr.GetSampleTexts()) - _WriteProto(language, os.path.join(FLAGS.lang, 'languages', language.id + '.textproto')) - - else: - raise Exception('Unsupported input type for --udhrs: ' + FLAGS.udhrs) - - -if __name__ == '__main__': - app.run(main) + _WriteProto( + language, + os.path.join(FLAGS.lang, "languages", language.id + ".textproto"), + ) + + else: + raise Exception("Unsupported input type for --udhrs: " + FLAGS.udhrs) + + +if __name__ == "__main__": + app.run(main) diff --git a/Lib/gftools/scripts/lang_support.py b/Lib/gftools/scripts/lang_support.py index bca41253..657c62b4 100755 --- a/Lib/gftools/scripts/lang_support.py +++ b/Lib/gftools/scripts/lang_support.py @@ -16,8 +16,7 @@ import argparse from fontTools.ttLib import TTFont -from gflanguages import (LoadLanguages, - LoadScripts) +from gflanguages import LoadLanguages, LoadScripts from gftools import fonts_public_pb2 from gftools.util import google_fonts as fonts from google.protobuf import text_format @@ -26,145 +25,218 @@ import os from pkg_resources import resource_filename -parser = argparse.ArgumentParser(description='Add language support metadata to METADATA.pb files') -parser.add_argument('--lang', '-l', help='Path to lang metadata package') -parser.add_argument('--report', '-r', action="store_true", help='Whether to output a report of lang metadata insights') -parser.add_argument('--sample_text_audit', '-s', action="store_true", help='Whether to run the sample text audit') -parser.add_argument('--out', '-o', help='Path to output directory for report') -parser.add_argument('metadata_files', help='Path to METADATA.pb files', nargs="+") +parser = argparse.ArgumentParser( + description="Add language support metadata to METADATA.pb files" +) +parser.add_argument("--lang", "-l", help="Path to lang metadata package") +parser.add_argument( + "--report", + "-r", + action="store_true", + help="Whether to output a report of lang metadata insights", +) +parser.add_argument( + "--sample_text_audit", + "-s", + action="store_true", + help="Whether to run the sample text audit", +) +parser.add_argument("--out", "-o", help="Path to output directory for report") +parser.add_argument("metadata_files", help="Path to METADATA.pb files", nargs="+") def _WriteCsv(path, rows): - with open(path, 'w', newline='') as csvfile: - writer = csv.writer(csvfile, delimiter='\t', quotechar='"', - quoting=csv.QUOTE_MINIMAL) - for row in rows: - writer.writerow(row) + with open(path, "w", newline="") as csvfile: + writer = csv.writer( + csvfile, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + for row in rows: + writer.writerow(row) def _WriteReport(metadata_paths, out_dir, languages): - rows = [[ 'id', 'name', 'lang', 'script', 'population', 'ec_base', 'ec_auxiliary', - 'ec_marks', 'ec_numerals', 'ec_punctuation', 'ec_index', 'st_fallback', - 'st_fallback_name', 'st_masthead_full', 'st_masthead_partial', - 'st_styles', 'st_tester', 'st_poster_sm', 'st_poster_md', - 'st_poster_lg', 'st_specimen_48', 'st_specimen_36', 'st_specimen_32', - 'st_specimen_21', 'st_specimen_16']] - - without_lang = [] - without_sample_text = [] - supported_without_sample_text = {} - for metadata_path in metadata_paths: - family = fonts.ReadProto(fonts_public_pb2.FamilyProto(), metadata_path) - if len(family.languages) == 0: - without_lang.append(family.name) - else: - supports_lang_with_sample_text = False - for lang_code in family.languages: - if languages[lang_code].HasField('sample_text'): - supports_lang_with_sample_text = True - break - if not supports_lang_with_sample_text: - without_sample_text.append(family.name) - for l in family.languages: - if not languages[l].HasField('sample_text') and l not in supported_without_sample_text: - supported_without_sample_text[l] = languages[l] - - for lang in supported_without_sample_text.values(): - rows.append([lang.id, lang.name, lang.language, lang.script, lang.population]) - - path = os.path.join(out_dir, 'support.csv') - _WriteCsv(path, rows) - - -def _SampleTextAudit(out_dir, languages, scripts, unused_scripts=[]): - rows = [['id','language','script','has_sample_text','historical']] - # sort by script|has_sample_text|historical|id - entries = [] - - min_sample_text_languages = 0 - by_script = {} - for l in languages.values(): - if l.script not in by_script: - by_script[l.script] = [] - by_script[l.script].append(l) - for script in by_script: - if script in unused_scripts: - continue - languages_with_sample_text = {l.id for l in by_script[script] if l.HasField('sample_text') and not l.sample_text.HasField('fallback_language')} - non_historical_languages_without_sample_text = [l for l in by_script[script] if not l.historical and l.id not in languages_with_sample_text] - if len(languages_with_sample_text) < 2: - if len(languages_with_sample_text) == 1 and len(by_script[script]) > 1 and len(non_historical_languages_without_sample_text) > 1: - min_sample_text_languages += 1 - elif len(languages_with_sample_text) == 0: - if len(non_historical_languages_without_sample_text) > 1: - min_sample_text_languages += 2 + rows = [ + [ + "id", + "name", + "lang", + "script", + "population", + "ec_base", + "ec_auxiliary", + "ec_marks", + "ec_numerals", + "ec_punctuation", + "ec_index", + "st_fallback", + "st_fallback_name", + "st_masthead_full", + "st_masthead_partial", + "st_styles", + "st_tester", + "st_poster_sm", + "st_poster_md", + "st_poster_lg", + "st_specimen_48", + "st_specimen_36", + "st_specimen_32", + "st_specimen_21", + "st_specimen_16", + ] + ] + + without_lang = [] + without_sample_text = [] + supported_without_sample_text = {} + for metadata_path in metadata_paths: + family = fonts.ReadProto(fonts_public_pb2.FamilyProto(), metadata_path) + if len(family.languages) == 0: + without_lang.append(family.name) else: - min_sample_text_languages += 1 - - if len(languages_with_sample_text) == 0 or (len(languages_with_sample_text) == 1 and len([l for l in by_script[script] if not l.historical]) > 1 ): - for l in by_script[script]: - entries.append({ - 'id': l.id, - 'language': l.name, - 'script': scripts[l.script].name, - 'has_sample_text': l.id in languages_with_sample_text, - 'historical': l.historical, - }) - - print(min_sample_text_languages) + supports_lang_with_sample_text = False + for lang_code in family.languages: + if languages[lang_code].HasField("sample_text"): + supports_lang_with_sample_text = True + break + if not supports_lang_with_sample_text: + without_sample_text.append(family.name) + for l in family.languages: + if ( + not languages[l].HasField("sample_text") + and l not in supported_without_sample_text + ): + supported_without_sample_text[l] = languages[l] + + for lang in supported_without_sample_text.values(): + rows.append([lang.id, lang.name, lang.language, lang.script, lang.population]) + + path = os.path.join(out_dir, "support.csv") + _WriteCsv(path, rows) - last_script = None - entries.sort(key = lambda x: (x['script'], not x['has_sample_text'], not x['historical'], x['id'])) - for e in entries: - if last_script is not None and e['script'] != last_script: - rows.append([]) - rows.append([e['id'], e['language'], e['script'], 'X' if e['has_sample_text'] else '', 'X' if e['historical'] else '']) - last_script = e['script'] - path = os.path.join(out_dir, 'sample_text_audit.csv') - _WriteCsv(path, rows) +def _SampleTextAudit(out_dir, languages, scripts, unused_scripts=[]): + rows = [["id", "language", "script", "has_sample_text", "historical"]] + # sort by script|has_sample_text|historical|id + entries = [] + + min_sample_text_languages = 0 + by_script = {} + for l in languages.values(): + if l.script not in by_script: + by_script[l.script] = [] + by_script[l.script].append(l) + for script in by_script: + if script in unused_scripts: + continue + languages_with_sample_text = { + l.id + for l in by_script[script] + if l.HasField("sample_text") + and not l.sample_text.HasField("fallback_language") + } + non_historical_languages_without_sample_text = [ + l + for l in by_script[script] + if not l.historical and l.id not in languages_with_sample_text + ] + if len(languages_with_sample_text) < 2: + if ( + len(languages_with_sample_text) == 1 + and len(by_script[script]) > 1 + and len(non_historical_languages_without_sample_text) > 1 + ): + min_sample_text_languages += 1 + elif len(languages_with_sample_text) == 0: + if len(non_historical_languages_without_sample_text) > 1: + min_sample_text_languages += 2 + else: + min_sample_text_languages += 1 + + if len(languages_with_sample_text) == 0 or ( + len(languages_with_sample_text) == 1 + and len([l for l in by_script[script] if not l.historical]) > 1 + ): + for l in by_script[script]: + entries.append( + { + "id": l.id, + "language": l.name, + "script": scripts[l.script].name, + "has_sample_text": l.id in languages_with_sample_text, + "historical": l.historical, + } + ) + + print(min_sample_text_languages) + + last_script = None + entries.sort( + key=lambda x: ( + x["script"], + not x["has_sample_text"], + not x["historical"], + x["id"], + ) + ) + for e in entries: + if last_script is not None and e["script"] != last_script: + rows.append([]) + rows.append( + [ + e["id"], + e["language"], + e["script"], + "X" if e["has_sample_text"] else "", + "X" if e["historical"] else "", + ] + ) + last_script = e["script"] + + path = os.path.join(out_dir, "sample_text_audit.csv") + _WriteCsv(path, rows) def main(args=None): - args = parser.parse_args(args) - languages = LoadLanguages(base_dir=args.lang) - scripts = LoadScripts(base_dir=args.lang) - - if args.report: - assert len(argv) > 1, 'No METADATA.pb files specified' - assert args.out is not None, 'No output dir specified (--out)' - print('Writing insights report...') - _WriteReport(argv[1:], args.out, languages) - elif args.sample_text_audit: - assert args.out is not None, 'No output dir specified (--out)' - print('Auditing sample text') - seen_scripts = set() - unused_scripts = set() - for path in argv[1:]: - family = fonts.ReadProto(fonts_public_pb2.FamilyProto(), path) - for l in family.languages: - seen_scripts.add(languages[l].script) - for s in scripts: - if s not in seen_scripts: - unused_scripts.add(s) - _SampleTextAudit(args.out, languages, scripts, unused_scripts) - else: - for path in args.metadata_files: - family_metadata = fonts.ReadProto(fonts_public_pb2.FamilyProto(), path) - if len(family_metadata.languages) > 0: - continue - exemplar_font_fp = os.path.join( - os.path.dirname(path), fonts.GetExemplarFont(family_metadata).filename - ) - exemplar_font = TTFont(exemplar_font_fp) - supported_languages = fonts.SupportedLanguages(exemplar_font, languages) - if family_metadata.HasField("is_noto") and family_metadata.is_noto: - supported_languages = [l for l in supported_languages if "Latn" not in l.id] - supported_languages = sorted([l.id for l in supported_languages]) - family_metadata.languages.extend(supported_languages) - fonts.WriteMetadata(family_metadata, path) - - - -if __name__ == '__main__': - main() + args = parser.parse_args(args) + languages = LoadLanguages(base_dir=args.lang) + scripts = LoadScripts(base_dir=args.lang) + + if args.report: + assert len(argv) > 1, "No METADATA.pb files specified" + assert args.out is not None, "No output dir specified (--out)" + print("Writing insights report...") + _WriteReport(argv[1:], args.out, languages) + elif args.sample_text_audit: + assert args.out is not None, "No output dir specified (--out)" + print("Auditing sample text") + seen_scripts = set() + unused_scripts = set() + for path in argv[1:]: + family = fonts.ReadProto(fonts_public_pb2.FamilyProto(), path) + for l in family.languages: + seen_scripts.add(languages[l].script) + for s in scripts: + if s not in seen_scripts: + unused_scripts.add(s) + _SampleTextAudit(args.out, languages, scripts, unused_scripts) + else: + for path in args.metadata_files: + family_metadata = fonts.ReadProto(fonts_public_pb2.FamilyProto(), path) + if len(family_metadata.languages) > 0: + continue + exemplar_font_fp = os.path.join( + os.path.dirname(path), fonts.GetExemplarFont(family_metadata).filename + ) + exemplar_font = TTFont(exemplar_font_fp) + supported_languages = fonts.SupportedLanguages(exemplar_font, languages) + if family_metadata.HasField("is_noto") and family_metadata.is_noto: + supported_languages = [ + l for l in supported_languages if "Latn" not in l.id + ] + supported_languages = sorted([l.id for l in supported_languages]) + family_metadata.languages.extend(supported_languages) + fonts.WriteMetadata(family_metadata, path) + + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/list_italicangle.py b/Lib/gftools/scripts/list_italicangle.py index 5801e759..41848c42 100755 --- a/Lib/gftools/scripts/list_italicangle.py +++ b/Lib/gftools/scripts/list_italicangle.py @@ -19,23 +19,24 @@ import tabulate from fontTools import ttLib -parser = argparse.ArgumentParser( - description='Print out italicAngle of the fonts') -parser.add_argument('font', nargs="+") -parser.add_argument('--csv', default=False, action='store_true') +parser = argparse.ArgumentParser(description="Print out italicAngle of the fonts") +parser.add_argument("font", nargs="+") +parser.add_argument("--csv", default=False, action="store_true") + def main(args=None): arg = parser.parse_args(args) - headers = ['filename', 'italicAngle'] + headers = ["filename", "italicAngle"] rows = [] for font in arg.font: ttfont = ttLib.TTFont(font) - rows.append([os.path.basename(font), ttfont['post'].italicAngle]) + rows.append([os.path.basename(font), ttfont["post"].italicAngle]) if arg.csv: import csv import sys + writer = csv.writer(sys.stdout) writer.writerows([headers]) writer.writerows(rows) @@ -43,5 +44,5 @@ def main(args=None): print(tabulate.tabulate(rows, headers, tablefmt="pipe")) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/Lib/gftools/scripts/list_panose.py b/Lib/gftools/scripts/list_panose.py index 981aa843..77e153a7 100755 --- a/Lib/gftools/scripts/list_panose.py +++ b/Lib/gftools/scripts/list_panose.py @@ -19,30 +19,31 @@ import tabulate from fontTools import ttLib -parser = argparse.ArgumentParser(description='Print out Panose of the fonts') -parser.add_argument('font', nargs="+") -parser.add_argument('--csv', default=False, action='store_true') +parser = argparse.ArgumentParser(description="Print out Panose of the fonts") +parser.add_argument("font", nargs="+") +parser.add_argument("--csv", default=False, action="store_true") def main(args=None): args = parser.parse_args(args) - headers = ['filename'] + headers = ["filename"] rows = [] for i, font in enumerate(args.font): row = [os.path.basename(font)] ttfont = ttLib.TTFont(font) - for k in sorted(ttfont['OS/2'].panose.__dict__.keys()): + for k in sorted(ttfont["OS/2"].panose.__dict__.keys()): if i < 1: headers.append(k) - row.append(getattr(ttfont['OS/2'].panose, k, 0)) + row.append(getattr(ttfont["OS/2"].panose, k, 0)) rows.append(row) def as_csv(rows): import csv import sys + writer = csv.writer(sys.stdout) writer.writerows([headers]) writer.writerows(rows) @@ -53,6 +54,6 @@ def as_csv(rows): print(tabulate.tabulate(rows, headers, tablefmt="pipe")) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/list_weightclass.py b/Lib/gftools/scripts/list_weightclass.py index a0f9533e..99327e4f 100755 --- a/Lib/gftools/scripts/list_weightclass.py +++ b/Lib/gftools/scripts/list_weightclass.py @@ -19,33 +19,33 @@ import tabulate from fontTools import ttLib -parser = argparse.ArgumentParser(description='Print out' - ' usWeightClass of the fonts') -parser.add_argument('font', nargs="+") -parser.add_argument('--csv', default=False, action='store_true') +parser = argparse.ArgumentParser(description="Print out" " usWeightClass of the fonts") +parser.add_argument("font", nargs="+") +parser.add_argument("--csv", default=False, action="store_true") def main(args=None): - args = parser.parse_args(args) - headers = ['filename', 'usWeightClass'] - rows = [] - for font in args.font: - ttfont = ttLib.TTFont(font) - rows.append([os.path.basename(font), ttfont['OS/2'].usWeightClass]) + args = parser.parse_args(args) + headers = ["filename", "usWeightClass"] + rows = [] + for font in args.font: + ttfont = ttLib.TTFont(font) + rows.append([os.path.basename(font), ttfont["OS/2"].usWeightClass]) - def as_csv(rows): - import csv - import sys - writer = csv.writer(sys.stdout) - writer.writerows([headers]) - writer.writerows(rows) - sys.exit(0) + def as_csv(rows): + import csv + import sys - if args.csv: - as_csv(rows) + writer = csv.writer(sys.stdout) + writer.writerows([headers]) + writer.writerows(rows) + sys.exit(0) - print(tabulate.tabulate(rows, headers, tablefmt="pipe")) + if args.csv: + as_csv(rows) -if __name__ == '__main__': - main() + print(tabulate.tabulate(rows, headers, tablefmt="pipe")) + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/list_widthclass.py b/Lib/gftools/scripts/list_widthclass.py index f8c16191..9b4a43e6 100755 --- a/Lib/gftools/scripts/list_widthclass.py +++ b/Lib/gftools/scripts/list_widthclass.py @@ -22,20 +22,20 @@ import tabulate from fontTools import ttLib -parser = argparse.ArgumentParser(description='Print out' - ' usWidthClass of the fonts') -parser.add_argument('font', nargs="+") -parser.add_argument('--csv', default=False, action='store_true') -parser.add_argument('--set', type=int, default=0) -parser.add_argument('--autofix', default=False, action='store_true') +parser = argparse.ArgumentParser(description="Print out" " usWidthClass of the fonts") +parser.add_argument("font", nargs="+") +parser.add_argument("--csv", default=False, action="store_true") +parser.add_argument("--set", type=int, default=0) +parser.add_argument("--autofix", default=False, action="store_true") + def print_info(fonts, print_csv=False): - headers = ['filename', 'usWidthClass'] + headers = ["filename", "usWidthClass"] rows = [] warnings = [] for font in fonts: ttfont = ttLib.TTFont(font) - usWidthClass = ttfont['OS/2'].usWidthClass + usWidthClass = ttfont["OS/2"].usWidthClass rows.append([os.path.basename(font), usWidthClass]) if usWidthClass != 5: warning = "WARNING: {} is {}, expected 5" @@ -79,7 +79,7 @@ def getFromFilename(filename): def fix(fonts, value=None): rows = [] - headers = ['filename', 'usWidthClass was', 'usWidthClass now'] + headers = ["filename", "usWidthClass was", "usWidthClass now"] for font in fonts: row = [font] @@ -88,10 +88,10 @@ def fix(fonts, value=None): usWidthClass = getFromFilename(font) else: usWidthClass = value - row.append(ttfont['OS/2'].usWidthClass) - ttfont['OS/2'].usWidthClass = usWidthClass - row.append(ttfont['OS/2'].usWidthClass) - ttfont.save(font + '.fix') + row.append(ttfont["OS/2"].usWidthClass) + ttfont["OS/2"].usWidthClass = usWidthClass + row.append(ttfont["OS/2"].usWidthClass) + ttfont.save(font + ".fix") rows.append(row) if rows: @@ -108,6 +108,6 @@ def main(args=None): sys.exit(0) print_info(args.font, print_csv=args.csv) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/manage_traffic_jam.py b/Lib/gftools/scripts/manage_traffic_jam.py index 0d6bc3c2..ec694288 100644 --- a/Lib/gftools/scripts/manage_traffic_jam.py +++ b/Lib/gftools/scripts/manage_traffic_jam.py @@ -75,7 +75,9 @@ def __exit__(self, exception_type, exception_value, exception_traceback): self.git_checkout_main() def user_input(self, item: PushItem): - input_text = "Bump pushlist: [y/n], block: [b] skip pr: [s], inspect: [i], quit: [q]?: " + input_text = ( + "Bump pushlist: [y/n], block: [b] skip pr: [s], inspect: [i], quit: [q]?: " + ) user_input = input(input_text) if "*" in user_input: @@ -284,7 +286,9 @@ def main(args=None): traffic_jam_data = (Path("~") / ".gf_traffic_jam_data.json").expanduser() push_items = PushItems.from_traffic_jam(traffic_jam_data) - push_items.sort(key=lambda x: x.category in [PushCategory.NEW, PushCategory.UPGRADE]) + push_items.sort( + key=lambda x: x.category in [PushCategory.NEW, PushCategory.UPGRADE] + ) if not args.show_open_prs: push_items = PushItems(i for i in push_items if i.merged == True) if "lists" in args.filter: diff --git a/Lib/gftools/scripts/metadata_vs_api.py b/Lib/gftools/scripts/metadata_vs_api.py index e11132a1..69c811d4 100755 --- a/Lib/gftools/scripts/metadata_vs_api.py +++ b/Lib/gftools/scripts/metadata_vs_api.py @@ -21,6 +21,7 @@ import os import sys import requests + if int(sys.version[0]) == 2: import urlparse elif int(sys.version[0]) == 3: @@ -28,40 +29,44 @@ from gftools.fonts_public_pb2 import FamilyProto from google.protobuf import text_format -description = ("This script compares the info on local METADATA.pb files" - " with data fetched from the Google Fonts Developer API.\n\n" - " In order to use it you need to provide an API key.") +description = ( + "This script compares the info on local METADATA.pb files" + " with data fetched from the Google Fonts Developer API.\n\n" + " In order to use it you need to provide an API key." +) parser = argparse.ArgumentParser(description=description) -parser.add_argument('key', help='Key from Google Fonts Developer API') -parser.add_argument('repo', - help=('Directory tree that contains' - ' directories with METADATA.pb files.')) -parser.add_argument('--cache', - help=('Directory to store a copy' - ' of the files in the fonts developer API.'), - default="/tmp/gftools-compare-git-api") -parser.add_argument('--verbose', - help='Print additional information', - action="store_true") -parser.add_argument('--ignore-copy-existing-ttf', action="store_true") -parser.add_argument('--autofix', - help='Apply automatic fixes to files.', - action="store_true") -parser.add_argument('--api', - help='Domain string to use to request.', - default="fonts.googleapis.com") +parser.add_argument("key", help="Key from Google Fonts Developer API") +parser.add_argument( + "repo", help=("Directory tree that contains" " directories with METADATA.pb files.") +) +parser.add_argument( + "--cache", + help=("Directory to store a copy" " of the files in the fonts developer API."), + default="/tmp/gftools-compare-git-api", +) +parser.add_argument( + "--verbose", help="Print additional information", action="store_true" +) +parser.add_argument("--ignore-copy-existing-ttf", action="store_true") +parser.add_argument( + "--autofix", help="Apply automatic fixes to files.", action="store_true" +) +parser.add_argument( + "--api", help="Domain string to use to request.", default="fonts.googleapis.com" +) def get_cache_font_path(cache_dir, fonturl): urlparts = urlparse.urlparse(fonturl) - cache_dir = os.path.join(cache_dir, - urlparts.netloc, - os.path.dirname(urlparts.path).strip('/')) + cache_dir = os.path.join( + cache_dir, urlparts.netloc, os.path.dirname(urlparts.path).strip("/") + ) if not os.path.exists(cache_dir): os.makedirs(cache_dir) fontname = os.path.basename(fonturl) return os.path.join(cache_dir, fontname) + def getVariantName(item): if item.style == "normal" and item.weight == 400: return "regular" @@ -76,19 +81,22 @@ def getVariantName(item): return name -API_URL = 'https://www.googleapis.com/webfonts/v1/webfonts?key={}' +API_URL = "https://www.googleapis.com/webfonts/v1/webfonts?key={}" + + def main(args=None): args = parser.parse_args(args) response = requests.get(API_URL.format(args.key)) try: - webfontList = response.json()['items'] - webfontListFamilyNames = [item['family'] for item in webfontList] + webfontList = response.json()["items"] + webfontListFamilyNames = [item["family"] for item in webfontList] except (ValueError, KeyError): - sys.exit("Unable to load and parse" - " list of families from Google Web Fonts API.") + sys.exit( + "Unable to load and parse" " list of families from Google Web Fonts API." + ) for dirpath, dirnames, filenames in os.walk(args.repo): - metadata_path = os.path.join(dirpath, 'METADATA.pb') + metadata_path = os.path.join(dirpath, "METADATA.pb") if not os.path.exists(metadata_path): continue @@ -98,131 +106,162 @@ def main(args=None): try: family = metadata.name except KeyError: - print(('ERROR: "{}" does not contain' - ' familyname info.').format(metadata_path), - file=sys.stderr) + print( + ('ERROR: "{}" does not contain' " familyname info.").format( + metadata_path + ), + file=sys.stderr, + ) continue try: index = webfontListFamilyNames.index(family) webfontsItem = webfontList[index] except ValueError: - print(('ERROR: Family "{}" could not be found' - ' in Google Web Fonts API.').format(family)) + print( + ( + 'ERROR: Family "{}" could not be found' " in Google Web Fonts API." + ).format(family) + ) continue webfontVariants = [] log_messages = [] - for variant, fonturl in webfontsItem['files'].items(): + for variant, fonturl in webfontsItem["files"].items(): cache_font_path = get_cache_font_path(args.cache, fonturl) webfontVariants.append(variant) if args.ignore_copy_existing_ttf and os.path.exists(cache_font_path): continue - with open(cache_font_path, 'w') as fp: + with open(cache_font_path, "w") as fp: found = False for font in metadata.fonts: if getVariantName(font) == variant: found = True if args.verbose: - print('Downloading "{}"' - ' as "{}"'.format(fonturl, - font.filename)) + print( + 'Downloading "{}"' + ' as "{}"'.format(fonturl, font.filename) + ) - #Saving: + # Saving: fp.write(requests.get(fonturl).text) - #Symlinking: + # Symlinking: src = cache_font_path dst_dir = os.path.dirname(cache_font_path) dst = os.path.join(dst_dir, font.filename) if not os.path.exists(dst): os.symlink(src, dst) if not found: - print(("ERROR: Google Fonts API references" - " a '{}' variant which is not declared" - " on local '{}'.").format(variant, - metadata_path)) - - for subset in webfontsItem['subsets']: + print( + ( + "ERROR: Google Fonts API references" + " a '{}' variant which is not declared" + " on local '{}'." + ).format(variant, metadata_path) + ) + + for subset in webfontsItem["subsets"]: if subset == "menu": # note about Google Web Fonts: # Menu subsets are no longer generated offline. continue if subset not in metadata.subsets: - print(('ERROR: "{}" ' - 'lacks subset "{}" in git.').format(family, subset), - file=sys.stderr) + print( + ('ERROR: "{}" ' 'lacks subset "{}" in git.').format(family, subset), + file=sys.stderr, + ) else: if args.verbose: - print(('OK: "{}" ' - 'subset "{}" in sync.').format(family, subset)) + print(('OK: "{}" ' 'subset "{}" in sync.').format(family, subset)) for subset in metadata.subsets: - if subset != "menu" and subset not in webfontsItem['subsets']: - print(('ERROR: "{}" ' - 'lacks subset "{}" in API.').format(family, subset), - file=sys.stderr) + if subset != "menu" and subset not in webfontsItem["subsets"]: + print( + ('ERROR: "{}" ' 'lacks subset "{}" in API.').format(family, subset), + file=sys.stderr, + ) if metadata.category == "SANS_SERIF": # That's fine :-) category = "sans-serif" else: category = metadata.category.lower() - if category != webfontsItem['category']: - print(('ERROR: "{}" category "{}" in git' - ' does not match category "{}"' - ' in API.').format(family, - metadata.category, - webfontsItem['category'])) + if category != webfontsItem["category"]: + print( + ( + 'ERROR: "{}" category "{}" in git' + ' does not match category "{}"' + " in API." + ).format(family, metadata.category, webfontsItem["category"]) + ) else: if args.verbose: - print(('OK: "{}" ' - 'category "{}" in sync.').format(family, - metadata.category)) - + print( + ('OK: "{}" ' 'category "{}" in sync.').format( + family, metadata.category + ) + ) for variant in webfontVariants: try: idx = [getVariantName(f) for f in metadata.fonts].index(variant) repoFileName = metadata.fonts[idx].filename - fonturl = webfontsItem['files'][variant] + fonturl = webfontsItem["files"][variant] fontpath = get_cache_font_path(args.cache, fonturl) import hashlib - google_md5 = hashlib.md5(open(fontpath, 'rb').read()).hexdigest() - data = open(os.path.join(dirpath, repoFileName), 'rb').read() + + google_md5 = hashlib.md5(open(fontpath, "rb").read()).hexdigest() + data = open(os.path.join(dirpath, repoFileName), "rb").read() repo_md5 = hashlib.md5(data).hexdigest() if repo_md5 == google_md5: - log_messages.append([variant, - 'OK', - '"{}" in sync'.format(repoFileName)]) + log_messages.append( + [variant, "OK", '"{}" in sync'.format(repoFileName)] + ) else: - log_messages.append([variant, - 'ERROR', - ('"{}" checksum mismatch, file' - ' in API does not match file' - ' in git.').format(repoFileName)]) + log_messages.append( + [ + variant, + "ERROR", + ( + '"{}" checksum mismatch, file' + " in API does not match file" + " in git." + ).format(repoFileName), + ] + ) except ValueError: - log_messages.append([variant, - 'ERROR', - ('"{}" available in API but' - ' not in git.').format(font.filename)]) + log_messages.append( + [ + variant, + "ERROR", + ('"{}" available in API but' " not in git.").format( + font.filename + ), + ] + ) for font in metadata.fonts: variant = getVariantName(font) try: webfontVariants.index(variant) except ValueError: - log_messages.append([variant, - 'ERROR', - ('"{}" available in git but' - ' not in API.').format(font.filename)]) + log_messages.append( + [ + variant, + "ERROR", + ('"{}" available in git but' " not in API.").format( + font.filename + ), + ] + ) # Sort all the messages by their respective # metadataFileName and print them: @@ -230,10 +269,10 @@ def main(args=None): variant, status, text = message if status == "OK": if args.verbose: - print('{}: {}'.format(status, text)) + print("{}: {}".format(status, text)) else: - print('{}: {}'.format(status, text), file=sys.stderr) + print("{}: {}".format(status, text), file=sys.stderr) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/nametable_from_filename.py b/Lib/gftools/scripts/nametable_from_filename.py index fdebfe5a..2a7fb640 100755 --- a/Lib/gftools/scripts/nametable_from_filename.py +++ b/Lib/gftools/scripts/nametable_from_filename.py @@ -25,125 +25,119 @@ from __future__ import print_function import re import ntpath -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter from fontTools.ttLib import TTFont, newTable WIN_SAFE_STYLES = [ - 'Regular', - 'Bold', - 'Italic', - 'BoldItalic', + "Regular", + "Bold", + "Italic", + "BoldItalic", ] -MACSTYLE = { - 'Regular': 0, - 'Bold': 1, - 'Italic': 2, - 'Bold Italic': 3 -} +MACSTYLE = {"Regular": 0, "Bold": 1, "Italic": 2, "Bold Italic": 3} # Weight name to value mapping: WEIGHTS = { - "Thin": 250, - "ExtraLight": 275, - "Light": 300, - "Regular": 400, - "Italic": 400, - "Medium": 500, - "SemiBold": 600, - "Bold": 700, - "ExtraBold": 800, - "Black": 900 + "Thin": 250, + "ExtraLight": 275, + "Light": 300, + "Regular": 400, + "Italic": 400, + "Medium": 500, + "SemiBold": 600, + "Bold": 700, + "ExtraBold": 800, + "Black": 900, } REQUIRED_FIELDS = [ - (0, 1, 0, 0), - (1, 1, 0, 0), - (2, 1, 0, 0), - (3, 1, 0, 0), - (4, 1, 0, 0), - (5, 1, 0, 0), - (6, 1, 0, 0), - (7, 1, 0, 0), - (8, 1, 0, 0), - (9, 1, 0, 0), - (11, 1, 0, 0), - (12, 1, 0, 0), - (13, 1, 0, 0), - (14, 1, 0, 0), - (0, 3, 1, 1033), - (1, 3, 1, 1033), - (1, 3, 1, 1033), - (2, 3, 1, 1033), - (3, 3, 1, 1033), - (4, 3, 1, 1033), - (5, 3, 1, 1033), - (6, 3, 1, 1033), - (7, 3, 1, 1033), - (8, 3, 1, 1033), - (9, 3, 1, 1033), - (11, 3, 1, 1033), - (12, 3, 1, 1033), - (13, 3, 1, 1033), - (14, 3, 1, 1033), + (0, 1, 0, 0), + (1, 1, 0, 0), + (2, 1, 0, 0), + (3, 1, 0, 0), + (4, 1, 0, 0), + (5, 1, 0, 0), + (6, 1, 0, 0), + (7, 1, 0, 0), + (8, 1, 0, 0), + (9, 1, 0, 0), + (11, 1, 0, 0), + (12, 1, 0, 0), + (13, 1, 0, 0), + (14, 1, 0, 0), + (0, 3, 1, 1033), + (1, 3, 1, 1033), + (1, 3, 1, 1033), + (2, 3, 1, 1033), + (3, 3, 1, 1033), + (4, 3, 1, 1033), + (5, 3, 1, 1033), + (6, 3, 1, 1033), + (7, 3, 1, 1033), + (8, 3, 1, 1033), + (9, 3, 1, 1033), + (11, 3, 1, 1033), + (12, 3, 1, 1033), + (13, 3, 1, 1033), + (14, 3, 1, 1033), ] def _split_camelcase(text): - return re.sub(r"(?<=\w)([A-Z])", r" \1", text) + return re.sub(r"(?<=\w)([A-Z])", r" \1", text) def _mac_subfamily_name(style_name): - if style_name.startswith('Italic'): - pass - elif 'Italic' in style_name: - style_name = style_name.replace('Italic', ' Italic') - return style_name + if style_name.startswith("Italic"): + pass + elif "Italic" in style_name: + style_name = style_name.replace("Italic", " Italic") + return style_name def _unique_id(version, vendor_id, filename): - # Glyphsapp style 2.000;MYFO;Arsenal-Bold - # version;vendorID;filename - return '%s;%s;%s' % (version, vendor_id, filename) + # Glyphsapp style 2.000;MYFO;Arsenal-Bold + # version;vendorID;filename + return "%s;%s;%s" % (version, vendor_id, filename) def _version(text): - return re.search(r'[0-9]{1,4}\.[0-9]{1,8}', text).group(0) + return re.search(r"[0-9]{1,4}\.[0-9]{1,8}", text).group(0) def _full_name(family_name, style_name): - style_name = _mac_subfamily_name(style_name) - full_name = '%s %s' % (family_name, style_name) - return full_name + style_name = _mac_subfamily_name(style_name) + full_name = "%s %s" % (family_name, style_name) + return full_name def _win_family_name(family_name, style_name): - name = family_name - if style_name not in WIN_SAFE_STYLES: - name = '%s %s' % (family_name, style_name) - if 'Italic' in name: - name = re.sub(r'Italic', r'', name) - return name + name = family_name + if style_name not in WIN_SAFE_STYLES: + name = "%s %s" % (family_name, style_name) + if "Italic" in name: + name = re.sub(r"Italic", r"", name) + return name def _win_subfamily_name(style_name): - name = style_name - if 'BoldItalic' == name: - return 'Bold Italic' - elif 'Italic' in name: - return 'Italic' - elif name == 'Bold': - return 'Bold' - else: - return 'Regular' + name = style_name + if "BoldItalic" == name: + return "Bold Italic" + elif "Italic" in name: + return "Italic" + elif name == "Bold": + return "Bold" + else: + return "Regular" def set_usWeightClass(style_name): - name = style_name - if name != 'Italic': - name = re.sub(r'Italic', r'', style_name) - return WEIGHTS[name] + name = style_name + if name != "Italic": + name = re.sub(r"Italic", r"", style_name) + return WEIGHTS[name] def set_macStyle(style_name): @@ -151,140 +145,139 @@ def set_macStyle(style_name): def set_fsSelection(fsSelection, style): - bits = fsSelection - if 'Regular' in style: - bits |= 0b1000000 - else: - bits &= ~0b1000000 + bits = fsSelection + if "Regular" in style: + bits |= 0b1000000 + else: + bits &= ~0b1000000 - if style in ['Bold', 'BoldItalic']: - bits |= 0b100000 - else: - bits &= ~0b100000 + if style in ["Bold", "BoldItalic"]: + bits |= 0b100000 + else: + bits &= ~0b100000 - if 'Italic' in style: - bits |= 0b1 - else: - bits &= ~0b1 + if "Italic" in style: + bits |= 0b1 + else: + bits &= ~0b1 - if not bits: - bits = 0b1000000 + if not bits: + bits = 0b1000000 - return bits + return bits def nametable_from_filename(filepath): - """Generate a new nametable based on a ttf and the GF Spec""" - font = TTFont(filepath) - old_table = font['name'] - new_table = newTable('name') - filename = ntpath.basename(filepath)[:-4] - - family_name, style_name = filename.split('-') - family_name = _split_camelcase(family_name) - - font_version = font['name'].getName(5, 3, 1, 1033) - font_version = font_version.toUnicode() - vendor_id = font['OS/2'].achVendID - - # SET MAC NAME FIELDS - # ------------------- - # Copyright - old_cp = old_table.getName(0, 3, 1, 1033).string.decode('utf_16_be') - new_table.setName(old_cp.encode('mac_roman'), 0, 1, 0, 0) - # Font Family Name - new_table.setName(family_name.encode('mac_roman'), 1, 1, 0, 0) - # Subfamily name - mac_subfamily_name = _mac_subfamily_name(style_name).encode('mac_roman') - new_table.setName(mac_subfamily_name, 2, 1, 0, 0) - # Unique ID - unique_id = _unique_id(_version(font_version), vendor_id, filename) - mac_unique_id = unique_id.encode('mac_roman') - new_table.setName(mac_unique_id, 3, 1, 0, 0) - # Full name - fullname = _full_name(family_name, style_name) - mac_fullname = fullname.encode('mac_roman') - new_table.setName(mac_fullname, 4, 1, 0, 0) - # Version string - old_v = old_table.getName(5, 3, 1, 1033).string.decode('utf_16_be') - mac_old_v = old_v.encode('mac_roman') - new_table.setName(mac_old_v, 5, 1, 0, 0) - # Postscript name - mac_ps_name = filename.encode('mac_roman') - new_table.setName(mac_ps_name, 6, 1, 0, 0) - - # SET WIN NAME FIELDS - # ------------------- - # Copyright - new_table.setName(old_cp, 0, 3, 1, 1033) - # Font Family Name - win_family_name = _win_family_name(family_name, style_name) - win_family_name = win_family_name.encode('utf_16_be') - new_table.setName(win_family_name, 1, 3, 1, 1033) - # Subfamily Name - win_subfamily_name = _win_subfamily_name(style_name).encode('utf_16_be') - new_table.setName(win_subfamily_name, 2, 3, 1, 1033) - # Unique ID - win_unique_id = unique_id.encode('utf_16_be') - new_table.setName(win_unique_id, 3, 3, 1, 1033) - # Full name - win_fullname = fullname.encode('utf_16_be') - new_table.setName(win_fullname, 4, 3, 1, 1033) - # Version string - win_old_v = old_v.encode('utf_16_be') - new_table.setName(win_old_v, 5, 3, 1, 1033) - # Postscript name - win_ps_name = filename.encode('utf_16_be') - new_table.setName(win_ps_name, 6, 3, 1, 1033) - - if style_name not in WIN_SAFE_STYLES: - # Preferred Family Name - new_table.setName(family_name.encode('utf_16_be'), 16, 3, 1, 1033) - # Preferred SubfamilyName - win_pref_subfam_name = _mac_subfamily_name(style_name).encode('utf_16_be') - new_table.setName(win_pref_subfam_name, 17, 3, 1, 1033) - - # PAD missing fields - # ------------------ - for field in REQUIRED_FIELDS: - text = None - if new_table.getName(*field): - pass # Name has already been updated - elif old_table.getName(*field): - text = old_table.getName(*field).string - elif old_table.getName(field[0], 3, 1, 1033): - text = old_table.getName(field[0], 3, 1, 1033).string.decode('utf_16_be') - elif old_table.getName(field[0], 1, 0, 0): # check if field exists for mac - text = old_table.getName(field[0], 3, 1, 1033).string.decode('mac_roman') - - if text: - new_table.setName(text, *field) - return new_table - - -parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) -parser.add_argument('fonts', nargs="+") + """Generate a new nametable based on a ttf and the GF Spec""" + font = TTFont(filepath) + old_table = font["name"] + new_table = newTable("name") + filename = ntpath.basename(filepath)[:-4] + + family_name, style_name = filename.split("-") + family_name = _split_camelcase(family_name) + + font_version = font["name"].getName(5, 3, 1, 1033) + font_version = font_version.toUnicode() + vendor_id = font["OS/2"].achVendID + + # SET MAC NAME FIELDS + # ------------------- + # Copyright + old_cp = old_table.getName(0, 3, 1, 1033).string.decode("utf_16_be") + new_table.setName(old_cp.encode("mac_roman"), 0, 1, 0, 0) + # Font Family Name + new_table.setName(family_name.encode("mac_roman"), 1, 1, 0, 0) + # Subfamily name + mac_subfamily_name = _mac_subfamily_name(style_name).encode("mac_roman") + new_table.setName(mac_subfamily_name, 2, 1, 0, 0) + # Unique ID + unique_id = _unique_id(_version(font_version), vendor_id, filename) + mac_unique_id = unique_id.encode("mac_roman") + new_table.setName(mac_unique_id, 3, 1, 0, 0) + # Full name + fullname = _full_name(family_name, style_name) + mac_fullname = fullname.encode("mac_roman") + new_table.setName(mac_fullname, 4, 1, 0, 0) + # Version string + old_v = old_table.getName(5, 3, 1, 1033).string.decode("utf_16_be") + mac_old_v = old_v.encode("mac_roman") + new_table.setName(mac_old_v, 5, 1, 0, 0) + # Postscript name + mac_ps_name = filename.encode("mac_roman") + new_table.setName(mac_ps_name, 6, 1, 0, 0) + + # SET WIN NAME FIELDS + # ------------------- + # Copyright + new_table.setName(old_cp, 0, 3, 1, 1033) + # Font Family Name + win_family_name = _win_family_name(family_name, style_name) + win_family_name = win_family_name.encode("utf_16_be") + new_table.setName(win_family_name, 1, 3, 1, 1033) + # Subfamily Name + win_subfamily_name = _win_subfamily_name(style_name).encode("utf_16_be") + new_table.setName(win_subfamily_name, 2, 3, 1, 1033) + # Unique ID + win_unique_id = unique_id.encode("utf_16_be") + new_table.setName(win_unique_id, 3, 3, 1, 1033) + # Full name + win_fullname = fullname.encode("utf_16_be") + new_table.setName(win_fullname, 4, 3, 1, 1033) + # Version string + win_old_v = old_v.encode("utf_16_be") + new_table.setName(win_old_v, 5, 3, 1, 1033) + # Postscript name + win_ps_name = filename.encode("utf_16_be") + new_table.setName(win_ps_name, 6, 3, 1, 1033) + + if style_name not in WIN_SAFE_STYLES: + # Preferred Family Name + new_table.setName(family_name.encode("utf_16_be"), 16, 3, 1, 1033) + # Preferred SubfamilyName + win_pref_subfam_name = _mac_subfamily_name(style_name).encode("utf_16_be") + new_table.setName(win_pref_subfam_name, 17, 3, 1, 1033) + + # PAD missing fields + # ------------------ + for field in REQUIRED_FIELDS: + text = None + if new_table.getName(*field): + pass # Name has already been updated + elif old_table.getName(*field): + text = old_table.getName(*field).string + elif old_table.getName(field[0], 3, 1, 1033): + text = old_table.getName(field[0], 3, 1, 1033).string.decode("utf_16_be") + elif old_table.getName(field[0], 1, 0, 0): # check if field exists for mac + text = old_table.getName(field[0], 3, 1, 1033).string.decode("mac_roman") + + if text: + new_table.setName(text, *field) + return new_table + + +parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) +parser.add_argument("fonts", nargs="+") def main(args=None): - args = parser.parse_args(args) + args = parser.parse_args(args) - for font_path in args.fonts: - nametable = nametable_from_filename(font_path) - font = TTFont(font_path) - font_filename = ntpath.basename(font_path) + for font_path in args.fonts: + nametable = nametable_from_filename(font_path) + font = TTFont(font_path) + font_filename = ntpath.basename(font_path) - font['name'] = nametable - style = font_filename[:-4].split('-')[-1] - font['OS/2'].usWeightClass = set_usWeightClass(style) - font['OS/2'].fsSelection = set_fsSelection(font['OS/2'].fsSelection, style) - win_style = font['name'].getName(2, 3, 1, 1033).string.decode('utf_16_be') - font['head'].macStyle = set_macStyle(win_style) + font["name"] = nametable + style = font_filename[:-4].split("-")[-1] + font["OS/2"].usWeightClass = set_usWeightClass(style) + font["OS/2"].fsSelection = set_fsSelection(font["OS/2"].fsSelection, style) + win_style = font["name"].getName(2, 3, 1, 1033).string.decode("utf_16_be") + font["head"].macStyle = set_macStyle(win_style) - font.save(font_path + '.fix') - print('font saved %s.fix' % font_path) + font.save(font_path + ".fix") + print("font saved %s.fix" % font_path) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/ots.py b/Lib/gftools/scripts/ots.py index bdcbb21d..3536f65c 100755 --- a/Lib/gftools/scripts/ots.py +++ b/Lib/gftools/scripts/ots.py @@ -20,33 +20,35 @@ import sys import os + def main(args=None): parser = argparse.ArgumentParser( - description='Run ots-sanitizer on all fonts in the directory') - parser.add_argument('path') + description="Run ots-sanitizer on all fonts in the directory" + ) + parser.add_argument("path") args = parser.parse_args(args) results = [] for p, i, files in os.walk(args.path): for f in files: - if f.endswith('.ttf'): + if f.endswith(".ttf"): try: font = os.path.join(p, f) process = ots.sanitize(font, check=True, capture_output=True) - result = '%s\t%s' % (font, process.stdout) + result = "%s\t%s" % (font, process.stdout) except ots.CalledProcessError as e: - result = '%s\t%s' % (font, e.output) + result = "%s\t%s" % (font, e.output) results.append(result) - print('%s\t%s' % (f, result)) + print("%s\t%s" % (f, result)) - with open('ots_gf_results.txt', 'w') as doc: - doc.write(''.join(results)) - print('done!') + with open("ots_gf_results.txt", "w") as doc: + doc.write("".join(results)) + print("done!") -if __name__ == '__main__': +if __name__ == "__main__": if len(sys.argv) != 2: - print('ERROR: Include path to OFL dir') + print("ERROR: Include path to OFL dir") else: main(sys.argv[-1]) diff --git a/Lib/gftools/scripts/push_stats.py b/Lib/gftools/scripts/push_stats.py index be9586ca..821b5a19 100755 --- a/Lib/gftools/scripts/push_stats.py +++ b/Lib/gftools/scripts/push_stats.py @@ -109,18 +109,14 @@ def main(args=None): "pushes": { "sandbox": [i.to_json() for i in sb_families], "production": [i.to_json() for i in prod_families], - } + }, } json.dump(commit_data, open(data_out, "w", encoding="utf8"), indent=4) print("Writing report") with open(args.out, "w") as doc: - doc.write( - template.render( - commit_data=json.dumps(commit_data) - ) - ) + doc.write(template.render(commit_data=json.dumps(commit_data))) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/Lib/gftools/scripts/push_status.py b/Lib/gftools/scripts/push_status.py index a8646ca8..4d26d6ba 100755 --- a/Lib/gftools/scripts/push_status.py +++ b/Lib/gftools/scripts/push_status.py @@ -32,7 +32,11 @@ import argparse from pathlib import Path from gftools.push.trafficjam import PushItems, PushStatus -from gftools.push.servers import gf_server_metadata, PRODUCTION_META_URL, SANDBOX_META_URL +from gftools.push.servers import ( + gf_server_metadata, + PRODUCTION_META_URL, + SANDBOX_META_URL, +) from gftools.push.items import Family import os @@ -75,7 +79,11 @@ def lint_server_files(fp: Path): def server_push_status(fp: Path, url: str): - families = [i for i in PushItems.from_server_file(fp, None, None) if isinstance(i.item(), Family)] + families = [ + i + for i in PushItems.from_server_file(fp, None, None) + if isinstance(i.item(), Family) + ] family_names = [i.item().name for i in families] gf_meta = gf_server_metadata(url) @@ -105,8 +113,6 @@ def push_report(fp: Path): server_push_report("Sandbox", sandbox_path, SANDBOX_META_URL) - - def main(args=None): parser = argparse.ArgumentParser() parser.add_argument("path", type=Path, help="Path to google/fonts repo") diff --git a/Lib/gftools/scripts/qa.py b/Lib/gftools/scripts/qa.py index 9f43aaee..13c4834c 100755 --- a/Lib/gftools/scripts/qa.py +++ b/Lib/gftools/scripts/qa.py @@ -113,7 +113,9 @@ def main(args=None): check_group.add_argument( "--diffbrowsers", action="store_true", help="Run Diffbrowsers" ) - check_group.add_argument("--interpolations", action="store_true", help="Run interpolation checker") + check_group.add_argument( + "--interpolations", action="store_true", help="Run interpolation checker" + ) parser.add_argument("-re", "--filter-fonts", help="Filter fonts by regex") parser.add_argument( "-o", "--out", default="out", help="Output path for check results" diff --git a/Lib/gftools/scripts/rangify.py b/Lib/gftools/scripts/rangify.py index 6b8e0327..9a608372 100755 --- a/Lib/gftools/scripts/rangify.py +++ b/Lib/gftools/scripts/rangify.py @@ -1,13 +1,13 @@ #!/usr/bin/python # # Copyright 2014 Google Inc. All rights reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -32,27 +32,29 @@ def get_codepoints(cps): def main(args=None): - parser = argparse.ArgumentParser( - description='Converts a .nam file to a list of ranges') - parser.add_argument('nam') - args = parser.parse_args(args) - codepoints_data = list(tokenize.tokenize(open(args.nam, 'rb').readline)) - codepoints = get_codepoints(codepoints_data) - codepoints.sort() - - seqs = [] - seq = (None,) - for cp in codepoints: - if seq[0] is None: - seq = (cp,cp) - elif seq[1] == cp - 1: - seq = (seq[0], cp) - else: - seqs.append(seq) - seq = (None,) - - for seq in seqs: - print(seq) - -if __name__ == '__main__': - main() + parser = argparse.ArgumentParser( + description="Converts a .nam file to a list of ranges" + ) + parser.add_argument("nam") + args = parser.parse_args(args) + codepoints_data = list(tokenize.tokenize(open(args.nam, "rb").readline)) + codepoints = get_codepoints(codepoints_data) + codepoints.sort() + + seqs = [] + seq = (None,) + for cp in codepoints: + if seq[0] is None: + seq = (cp, cp) + elif seq[1] == cp - 1: + seq = (seq[0], cp) + else: + seqs.append(seq) + seq = (None,) + + for seq in seqs: + print(seq) + + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/remap_font.py b/Lib/gftools/scripts/remap_font.py index 4cd617fd..d77a0224 100644 --- a/Lib/gftools/scripts/remap_font.py +++ b/Lib/gftools/scripts/remap_font.py @@ -15,14 +15,15 @@ from fontTools.ttLib import TTFont + def grovel_substitutions(font, lookup, glyphmap): if lookup.LookupType == 7: raise NotImplementedError - gmap = lambda g: glyphmap.get(g,g) + gmap = lambda g: glyphmap.get(g, g) go = font.getGlyphOrder() def do_coverage(c): - c.glyphs = list(sorted([gmap(g) for g in c.glyphs], key=lambda g:go.index(g))) + c.glyphs = list(sorted([gmap(g) for g in c.glyphs], key=lambda g: go.index(g))) return c for st in lookup.SubTable: @@ -52,7 +53,9 @@ def do_coverage(c): subrule.Input = [gmap(c) for c in subrule.Input] elif st.Format == 2: do_coverage(st.Coverage) - st.ClassDef.classDefs = {gmap(k):v for k,v in st.ClassDef.classDefs.items()} + st.ClassDef.classDefs = { + gmap(k): v for k, v in st.ClassDef.classDefs.items() + } else: st.Coverage = [do_coverage(c) for c in st.Coverage] elif lookup.LookupType == 6: @@ -65,20 +68,28 @@ def do_coverage(c): subrule.LookAhead = [gmap(c) for c in subrule.LookAhead] elif st.Format == 2: do_coverage(st.Coverage) - st.BacktrackClassDef.classDefs = {gmap(k):v for k,v in st.BacktrackClassDef.classDefs.items()} - st.InputClassDef.classDefs = {gmap(k):v for k,v in st.InputClassDef.classDefs.items()} - st.LookAheadClassDef.classDefs = {gmap(k):v for k,v in st.LookAheadClassDef.classDefs.items()} + st.BacktrackClassDef.classDefs = { + gmap(k): v for k, v in st.BacktrackClassDef.classDefs.items() + } + st.InputClassDef.classDefs = { + gmap(k): v for k, v in st.InputClassDef.classDefs.items() + } + st.LookAheadClassDef.classDefs = { + gmap(k): v for k, v in st.LookAheadClassDef.classDefs.items() + } elif st.Format == 3: - st.BacktrackCoverage = [ do_coverage(c) for c in st.BacktrackCoverage] - st.InputCoverage = [ do_coverage(c) for c in st.InputCoverage] - st.LookAheadCoverage = [ do_coverage(c) for c in st.LookAheadCoverage] + st.BacktrackCoverage = [do_coverage(c) for c in st.BacktrackCoverage] + st.InputCoverage = [do_coverage(c) for c in st.InputCoverage] + st.LookAheadCoverage = [do_coverage(c) for c in st.LookAheadCoverage] def main(args=None): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--map-file", metavar="TXT", help="Newline-separated mappings") parser.add_argument("--output", "-o", metavar="TTF", help="Output font binary") - parser.add_argument("--deep", action="store_true", help="Also remap inside GSUB table") + parser.add_argument( + "--deep", action="store_true", help="Also remap inside GSUB table" + ) parser.add_argument("font", metavar="TTF", help="Input font binary") parser.add_argument("mapping", nargs="*", help="Codepoint-to-glyph mapping") @@ -113,7 +124,9 @@ def main(args=None): codepoint = ord(codepoint) mapping[codepoint] = newglyph if newglyph not in font.getGlyphOrder(): - print(f"Glyph '{newglyph}' (to be mapped to U+{codepoint:04X}) not found in font") + print( + f"Glyph '{newglyph}' (to be mapped to U+{codepoint:04X}) not found in font" + ) sys.exit(1) if codepoint in cmap: glyph_mapping[cmap[codepoint]] = newglyph diff --git a/Lib/gftools/scripts/rename_font.py b/Lib/gftools/scripts/rename_font.py index 2597eae6..a6ed8903 100755 --- a/Lib/gftools/scripts/rename_font.py +++ b/Lib/gftools/scripts/rename_font.py @@ -19,11 +19,14 @@ def main(args=None): parser.add_argument("font") parser.add_argument("new_name", help="New family name") parser.add_argument("-o", "--out", help="Output path") - parser.add_argument("--just-family", action="store_true", - help="Only change family name and names based off it, such as the " - "PostScript name. (By default, the old family name is replaced " - "by the new name in all name table entries, including copyright, " - "description, etc.)") + parser.add_argument( + "--just-family", + action="store_true", + help="Only change family name and names based off it, such as the " + "PostScript name. (By default, the old family name is replaced " + "by the new name in all name table entries, including copyright, " + "description, etc.)", + ) args = parser.parse_args(args) font = TTFont(args.font) diff --git a/Lib/gftools/scripts/sanity_check.py b/Lib/gftools/scripts/sanity_check.py index 303e5bee..200cb4cb 100755 --- a/Lib/gftools/scripts/sanity_check.py +++ b/Lib/gftools/scripts/sanity_check.py @@ -30,491 +30,536 @@ FLAGS = flags.FLAGS -flags.DEFINE_boolean('suppress_pass', True, 'Whether to print pass: results') -flags.DEFINE_boolean('check_metadata', True, 'Whether to check METADATA values') -flags.DEFINE_boolean('check_font', True, 'Whether to check font values') -flags.DEFINE_string('repair_script', None, 'Where to write a repair script') +flags.DEFINE_boolean("suppress_pass", True, "Whether to print pass: results") +flags.DEFINE_boolean("check_metadata", True, "Whether to check METADATA values") +flags.DEFINE_boolean("check_font", True, "Whether to check font values") +flags.DEFINE_string("repair_script", None, "Where to write a repair script") _FIX_TYPE_OPTS = [ - 'all', 'name', 'filename', 'postScriptName', 'fullName', 'fsSelection', - 'fsType', 'usWeightClass', 'emptyGlyphLSB' + "all", + "name", + "filename", + "postScriptName", + "fullName", + "fsSelection", + "fsType", + "usWeightClass", + "emptyGlyphLSB", ] -flags.DEFINE_multi_string('fix_type', 'all', - 'What types of problems should be fixed by ' - 'repair_script. ' - 'Choices: ' + ', '.join(_FIX_TYPE_OPTS)) +flags.DEFINE_multi_string( + "fix_type", + "all", + "What types of problems should be fixed by " + "repair_script. " + "Choices: " + ", ".join(_FIX_TYPE_OPTS), +) ResultMessageTuple = collections.namedtuple( - 'ResultMessageTuple', ['happy', 'message', 'path', 'repair_script']) + "ResultMessageTuple", ["happy", "message", "path", "repair_script"] +) def _HappyResult(message, path): - return ResultMessageTuple(True, message, path, None) + return ResultMessageTuple(True, message, path, None) def _SadResult(message, path, repair_script=None): - return ResultMessageTuple(False, message, path, repair_script) + return ResultMessageTuple(False, message, path, repair_script) def _DropEmptyPathSegments(path): - """Removes empty segments from the end of path. + """Removes empty segments from the end of path. - Args: - path: A filesystem path. - Returns: - path with trailing empty segments removed. Eg /duck/// => /duck. - """ - while True: - (head, tail) = os.path.split(path) - if tail: - break - path = head - return path + Args: + path: A filesystem path. + Returns: + path with trailing empty segments removed. Eg /duck/// => /duck. + """ + while True: + (head, tail) = os.path.split(path) + if tail: + break + path = head + return path def _SanityCheck(path): - """Runs various sanity checks on the font family under path. + """Runs various sanity checks on the font family under path. + + Args: + path: A directory containing a METADATA.pb file. + Returns: + A list of ResultMessageTuple's. + """ + try: + fonts.Metadata(path) + except ValueError as e: + return [_SadResult("Bad METADATA.pb: " + e.message, path)] + + results = [] + if FLAGS.check_metadata: + results.extend(_CheckLicense(path)) + results.extend(_CheckNameMatching(path)) + + if FLAGS.check_font: + results.extend(_CheckFontInternalValues(path)) - Args: - path: A directory containing a METADATA.pb file. - Returns: - A list of ResultMessageTuple's. - """ - try: - fonts.Metadata(path) - except ValueError as e: - return [_SadResult('Bad METADATA.pb: ' + e.message, path)] - - results = [] - if FLAGS.check_metadata: - results.extend(_CheckLicense(path)) - results.extend(_CheckNameMatching(path)) - - if FLAGS.check_font: - results.extend(_CheckFontInternalValues(path)) - - return results + return results def _CheckLicense(path): - """Verifies that METADATA.pb license is correct under path. + """Verifies that METADATA.pb license is correct under path. - Assumes path is of the form ...///METADATA.pb. + Assumes path is of the form ...///METADATA.pb. - Args: - path: A directory containing a METADATA.pb file. - Returns: - A list with one ResultMessageTuple. If happy, license is good. - """ - metadata = fonts.Metadata(path) - lic = metadata.license - lic_dir = os.path.basename(os.path.dirname(path)) + Args: + path: A directory containing a METADATA.pb file. + Returns: + A list with one ResultMessageTuple. If happy, license is good. + """ + metadata = fonts.Metadata(path) + lic = metadata.license + lic_dir = os.path.basename(os.path.dirname(path)) - # We use /apache for the license Apache2 - if lic_dir == 'apache': - lic_dir += '2' + # We use /apache for the license Apache2 + if lic_dir == "apache": + lic_dir += "2" - result = _HappyResult('License consistantly %s' % lic, path) - # if we were Python 3 we'd use casefold(); this will suffice - if lic_dir.lower() != lic.lower(): - result = _SadResult('Dir license != METADATA license: %s != %s' % - (lic_dir, lic), path) + result = _HappyResult("License consistantly %s" % lic, path) + # if we were Python 3 we'd use casefold(); this will suffice + if lic_dir.lower() != lic.lower(): + result = _SadResult( + "Dir license != METADATA license: %s != %s" % (lic_dir, lic), path + ) - return [result] + return [result] def _CheckNameMatching(path): - """Verifies the various name fields in the METADATA.pb file are sane. - - Args: - path: A directory containing a METADATA.pb file. - Returns: - A list of ResultMessageTuple, one per validation performed. - """ - results = [] - metadata = fonts.Metadata(path) - name = metadata.name - - for font in metadata.fonts: - # We assume style/weight is correct in METADATA - style = font.style - weight = font.weight - values = [('name', name, font.name), ('filename', fonts.FilenameFor( - name, style, weight, '.ttf'), font.filename), - ('postScriptName', fonts.FilenameFor(name, style, weight), - font.post_script_name), ('fullName', fonts.FullnameFor( - name, style, weight), font.full_name)] - - for (key, expected, actual) in values: - if expected != actual: + """Verifies the various name fields in the METADATA.pb file are sane. + + Args: + path: A directory containing a METADATA.pb file. + Returns: + A list of ResultMessageTuple, one per validation performed. + """ + results = [] + metadata = fonts.Metadata(path) + name = metadata.name + + for font in metadata.fonts: + # We assume style/weight is correct in METADATA + style = font.style + weight = font.weight + values = [ + ("name", name, font.name), + ("filename", fonts.FilenameFor(name, style, weight, ".ttf"), font.filename), + ( + "postScriptName", + fonts.FilenameFor(name, style, weight), + font.post_script_name, + ), + ("fullName", fonts.FullnameFor(name, style, weight), font.full_name), + ] + + for key, expected, actual in values: + if expected != actual: + results.append( + _SadResult( + "%s METADATA %s/%d %s expected %s, got %s" + % (name, style, weight, key, expected, actual), + path, + _FixMetadata(style, weight, key, expected), + ) + ) + + if not results: results.append( - _SadResult('%s METADATA %s/%d %s expected %s, got %s' % - (name, style, weight, key, expected, actual), path, - _FixMetadata(style, weight, key, expected))) - - if not results: - results.append( - _HappyResult('METADATA name consistently derived from "%s"' % name, - path)) + _HappyResult('METADATA name consistently derived from "%s"' % name, path) + ) - return results + return results def _IsItalic(style): - return style.lower() == 'italic' + return style.lower() == "italic" def _IsBold(weight): - """Is this weight considered bold? + """Is this weight considered bold? - Per Dave C, only 700 will be considered bold. + Per Dave C, only 700 will be considered bold. - Args: - weight: Font weight. - Returns: - True if weight is considered bold, otherwise False. - """ - return weight == 700 + Args: + weight: Font weight. + Returns: + True if weight is considered bold, otherwise False. + """ + return weight == 700 def _ShouldFix(key): - return FLAGS.fix_type and (key in FLAGS.fix_type or 'all' in FLAGS.fix_type) + return FLAGS.fix_type and (key in FLAGS.fix_type or "all" in FLAGS.fix_type) def _FixMetadata(style, weight, key, expected): - if not _ShouldFix(key): - return None + if not _ShouldFix(key): + return None - if not isinstance(expected, int): - expected = '\'%s\'' % expected + if not isinstance(expected, int): + expected = "'%s'" % expected - return ('[f for f in metadata.fonts if f.style == \'%s\' ' - 'and f.weight == %d][0].%s = %s') % ( - style, weight, re.sub('([a-z])([A-Z])', r'\1_\2', key).lower(), - expected) + return ( + "[f for f in metadata.fonts if f.style == '%s' " + "and f.weight == %d][0].%s = %s" + ) % (style, weight, re.sub("([a-z])([A-Z])", r"\1_\2", key).lower(), expected) def _FixFsSelectionBit(key, expected): - """Write a repair script to fix a bad fsSelection bit. + """Write a repair script to fix a bad fsSelection bit. - Args: - key: The name of an fsSelection flag, eg 'ITALIC' or 'BOLD'. - expected: Expected value, true/false, of the flag. - Returns: - A python script to fix the problem. - """ - if not _ShouldFix('fsSelection'): - return None + Args: + key: The name of an fsSelection flag, eg 'ITALIC' or 'BOLD'. + expected: Expected value, true/false, of the flag. + Returns: + A python script to fix the problem. + """ + if not _ShouldFix("fsSelection"): + return None - op = '|=' - verb = 'set' - mask = bin(fonts.FsSelectionMask(key)) - if not expected: - op = '&=' - verb = 'unset' - mask = '~' + mask + op = "|=" + verb = "set" + mask = bin(fonts.FsSelectionMask(key)) + if not expected: + op = "&=" + verb = "unset" + mask = "~" + mask - return 'ttf[\'OS/2\'].fsSelection %s %s # %s %s' % (op, mask, verb, key) + return "ttf['OS/2'].fsSelection %s %s # %s %s" % (op, mask, verb, key) def _FixFsType(expected): - if not _ShouldFix('fsType'): - return None - return 'ttf[\'OS/2\'].fsType = %d' % expected + if not _ShouldFix("fsType"): + return None + return "ttf['OS/2'].fsType = %d" % expected def _FixWeightClass(expected): - if not _ShouldFix('usWeightClass'): - return None - return 'ttf[\'OS/2\'].usWeightClass = %d' % expected + if not _ShouldFix("usWeightClass"): + return None + return "ttf['OS/2'].usWeightClass = %d" % expected def _FixBadNameRecord(friendly_name, name_id, expected): - if not _ShouldFix(friendly_name): - return None + if not _ShouldFix(friendly_name): + return None - return ('for nr in [n for n in ttf[\'name\'].names if n.nameID == %d]:\n' - ' nr.string = \'%s\'.encode(nr.getEncoding()) # Fix %s' % - (name_id, expected, friendly_name)) + return ( + "for nr in [n for n in ttf['name'].names if n.nameID == %d]:\n" + " nr.string = '%s'.encode(nr.getEncoding()) # Fix %s" + % (name_id, expected, friendly_name) + ) def _FixMissingNameRecord(friendly_name, name_id, expected): - if not _ShouldFix(friendly_name): - return None + if not _ShouldFix(friendly_name): + return None - return ('nr = ttLib.tables._n_a_m_e.NameRecord()\n' - 'nr.nameID = %d # %s' - 'nr.langID = 0x409\n' - 'nr.platEncID = 1\n' - 'nr.platformID = 3\n' - 'nr.string = \'%s\'.encode(nr.getEncoding())\n' - 'ttf[\'name\'].names.append(nr)\n' % (name_id, friendly_name, - expected)) + return ( + "nr = ttLib.tables._n_a_m_e.NameRecord()\n" + "nr.nameID = %d # %s" + "nr.langID = 0x409\n" + "nr.platEncID = 1\n" + "nr.platformID = 3\n" + "nr.string = '%s'.encode(nr.getEncoding())\n" + "ttf['name'].names.append(nr)\n" % (name_id, friendly_name, expected) + ) def _FixEmptyGlyphLsb(glyph_name): - if not _ShouldFix('emptyGlyphLSB'): - return None + if not _ShouldFix("emptyGlyphLSB"): + return None - return 'ttf[\'hmtx\'][\'%s\'] = [ttf[\'hmtx\'][\'%s\'][0], 0]\n' % ( - glyph_name, glyph_name) + return "ttf['hmtx']['%s'] = [ttf['hmtx']['%s'][0], 0]\n" % (glyph_name, glyph_name) def _CheckFontOS2Values(path, font, ttf): - """Check sanity of values hidden in the 'OS/2' table. - - Notably usWeightClass, fsType, fsSelection. - - Args: - path: Path to directory containing font. - font: A font record from a METADATA.pb. - ttf: A fontTools.ttLib.TTFont for the font. - Returns: - A list of ResultMessageTuple for tests performed. - """ - results = [] - - font_file = font.filename - full_font_file = os.path.join(path, font_file) - expected_style = font.style - expected_weight = font.weight - - os2 = ttf['OS/2'] - fs_selection_flags = fonts.FsSelectionFlags(os2.fsSelection) - actual_weight = os2.usWeightClass - fs_type = os2.fsType - - marked_oblique = 'OBLIQUE' in fs_selection_flags - marked_italic = 'ITALIC' in fs_selection_flags - marked_bold = 'BOLD' in fs_selection_flags - - expect_italic = _IsItalic(expected_style) - expect_bold = _IsBold(expected_weight) - # Per Dave C, we should NEVER set oblique, use 0 for italic - expect_oblique = False - - results.append( - ResultMessageTuple(marked_italic == expect_italic, - '%s %s/%d fsSelection marked_italic %d' % - (font_file, expected_style, expected_weight, - marked_italic), full_font_file, - _FixFsSelectionBit('ITALIC', expect_italic))) - results.append( - ResultMessageTuple(marked_bold == expect_bold, - '%s %s/%d fsSelection marked_bold %d' % - (font_file, expected_style, expected_weight, - marked_bold), full_font_file, - _FixFsSelectionBit('BOLD', expect_bold))) - - results.append( - ResultMessageTuple(marked_oblique == expect_oblique, - '%s %s/%d fsSelection marked_oblique %d' % - (font_file, expected_style, expected_weight, - marked_oblique), full_font_file, - _FixFsSelectionBit('OBLIQUE', expect_oblique))) - - # For weight < 300, just confirm weight [250, 300) - # TODO(user): we should also verify ordering is correct - weight_ok = expected_weight == actual_weight - weight_msg = str(expected_weight) - if expected_weight < 300: - weight_ok = actual_weight >= 250 and actual_weight < 300 - weight_msg = '[250, 300)' - - results.append( - ResultMessageTuple(weight_ok, - '%s %s/%d weight expected: %s usWeightClass: %d' % - (font_file, expected_style, expected_weight, - weight_msg, actual_weight), full_font_file, - _FixWeightClass(expected_weight))) - - expected_fs_type = 0 - results.append( - ResultMessageTuple(expected_fs_type == fs_type, - '%s %s/%d fsType expected: %d fsType: %d' % - (font_file, expected_style, expected_weight, - expected_fs_type, fs_type), full_font_file, - _FixFsType(expected_fs_type))) - - return results + """Check sanity of values hidden in the 'OS/2' table. + Notably usWeightClass, fsType, fsSelection. -def _CheckFontNameValues(path, name, font, ttf): - """Check sanity of values in the 'name' table. - - Specifically the fullname and postScriptName. - - Args: - path: Path to directory containing font. - name: The name of the family. - font: A font record from a METADATA.pb. - ttf: A fontTools.ttLib.TTFont for the font. - Returns: - A list of ResultMessageTuple for tests performed. - """ - results = [] - - style = font.style - weight = font.weight - full_font_file = os.path.join(path, font.filename) - - expectations = [('family', fonts.NAME_FAMILY, name), - ('postScriptName', fonts.NAME_PSNAME, fonts.FilenameFor( - name, style, weight)), ('fullName', fonts.NAME_FULLNAME, - fonts.FullnameFor( - name, style, weight))] - - for (friendly_name, name_id, expected) in expectations: - # If you have lots of name records they should ALL have the right value - actuals = fonts.ExtractNames(ttf, name_id) - for (idx, actual) in enumerate(actuals): - results.append( - ResultMessageTuple(expected == actual, - '%s %s/%d \'name\' %s[%d] expected %s, got %s' % - (name, style, weight, friendly_name, idx, expected, - actual), full_font_file, - _FixBadNameRecord(friendly_name, name_id, - expected))) - - # should have at least one actual - if not actuals: - results.append( - _SadResult('%s %s/%d \'name\' %s has NO values' % - (name, style, weight, friendly_name), full_font_file, - _FixMissingNameRecord(friendly_name, name_id, expected))) - - return results + Args: + path: Path to directory containing font. + font: A font record from a METADATA.pb. + ttf: A fontTools.ttLib.TTFont for the font. + Returns: + A list of ResultMessageTuple for tests performed. + """ + results = [] + font_file = font.filename + full_font_file = os.path.join(path, font_file) + expected_style = font.style + expected_weight = font.weight + + os2 = ttf["OS/2"] + fs_selection_flags = fonts.FsSelectionFlags(os2.fsSelection) + actual_weight = os2.usWeightClass + fs_type = os2.fsType + + marked_oblique = "OBLIQUE" in fs_selection_flags + marked_italic = "ITALIC" in fs_selection_flags + marked_bold = "BOLD" in fs_selection_flags + + expect_italic = _IsItalic(expected_style) + expect_bold = _IsBold(expected_weight) + # Per Dave C, we should NEVER set oblique, use 0 for italic + expect_oblique = False + + results.append( + ResultMessageTuple( + marked_italic == expect_italic, + "%s %s/%d fsSelection marked_italic %d" + % (font_file, expected_style, expected_weight, marked_italic), + full_font_file, + _FixFsSelectionBit("ITALIC", expect_italic), + ) + ) + results.append( + ResultMessageTuple( + marked_bold == expect_bold, + "%s %s/%d fsSelection marked_bold %d" + % (font_file, expected_style, expected_weight, marked_bold), + full_font_file, + _FixFsSelectionBit("BOLD", expect_bold), + ) + ) + + results.append( + ResultMessageTuple( + marked_oblique == expect_oblique, + "%s %s/%d fsSelection marked_oblique %d" + % (font_file, expected_style, expected_weight, marked_oblique), + full_font_file, + _FixFsSelectionBit("OBLIQUE", expect_oblique), + ) + ) + + # For weight < 300, just confirm weight [250, 300) + # TODO(user): we should also verify ordering is correct + weight_ok = expected_weight == actual_weight + weight_msg = str(expected_weight) + if expected_weight < 300: + weight_ok = actual_weight >= 250 and actual_weight < 300 + weight_msg = "[250, 300)" + + results.append( + ResultMessageTuple( + weight_ok, + "%s %s/%d weight expected: %s usWeightClass: %d" + % (font_file, expected_style, expected_weight, weight_msg, actual_weight), + full_font_file, + _FixWeightClass(expected_weight), + ) + ) + + expected_fs_type = 0 + results.append( + ResultMessageTuple( + expected_fs_type == fs_type, + "%s %s/%d fsType expected: %d fsType: %d" + % (font_file, expected_style, expected_weight, expected_fs_type, fs_type), + full_font_file, + _FixFsType(expected_fs_type), + ) + ) -def _CheckLSB0ForEmptyGlyphs(path, font, ttf): - """Checks if font has empty (loca[n] == loca[n+1]) glyphs that have non-0 lsb. - - There is no reason to set such lsb's. - - Args: - path: Path to directory containing font. - font: A font record from a METADATA.pb. - ttf: A fontTools.ttLib.TTFont for the font. - Returns: - A list of ResultMessageTuple for tests performed. - """ - results = [] - if 'loca' not in ttf: return results - for glyph_index, glyph_name in enumerate(ttf.getGlyphOrder()): - is_empty = ttf['loca'][glyph_index] == ttf['loca'][glyph_index + 1] - lsb = ttf['hmtx'][glyph_name][1] - if is_empty and lsb != 0: - results.append( - _SadResult( - '%s %s/%d [\'hmtx\'][\'%s\'][1] (lsb) should be 0 but is %d' % - (font.name, font.style, font.weight, glyph_name, lsb), - os.path.join(path, font.filename), _FixEmptyGlyphLsb(glyph_name))) - return results -def _CheckFontInternalValues(path): - """Validates fonts internal metadata matches METADATA.pb values. +def _CheckFontNameValues(path, name, font, ttf): + """Check sanity of values in the 'name' table. - In particular, checks 'OS/2' {usWeightClass, fsSelection, fsType} and 'name' - {fullName, postScriptName} values. + Specifically the fullname and postScriptName. - Args: - path: A directory containing a METADATA.pb file. - Returns: - A list of ResultMessageTuple, one per validation performed. - """ - results = [] - metadata = fonts.Metadata(path) - name = metadata.name + Args: + path: Path to directory containing font. + name: The name of the family. + font: A font record from a METADATA.pb. + ttf: A fontTools.ttLib.TTFont for the font. + Returns: + A list of ResultMessageTuple for tests performed. + """ + results = [] - for font in metadata.fonts: - font_file = font.filename - with contextlib.closing(ttLib.TTFont(os.path.join(path, font_file))) as ttf: - results.extend(_CheckFontOS2Values(path, font, ttf)) - results.extend(_CheckFontNameValues(path, name, font, ttf)) - results.extend(_CheckLSB0ForEmptyGlyphs(path, font, ttf)) + style = font.style + weight = font.weight + full_font_file = os.path.join(path, font.filename) + + expectations = [ + ("family", fonts.NAME_FAMILY, name), + ("postScriptName", fonts.NAME_PSNAME, fonts.FilenameFor(name, style, weight)), + ("fullName", fonts.NAME_FULLNAME, fonts.FullnameFor(name, style, weight)), + ] + + for friendly_name, name_id, expected in expectations: + # If you have lots of name records they should ALL have the right value + actuals = fonts.ExtractNames(ttf, name_id) + for idx, actual in enumerate(actuals): + results.append( + ResultMessageTuple( + expected == actual, + "%s %s/%d 'name' %s[%d] expected %s, got %s" + % (name, style, weight, friendly_name, idx, expected, actual), + full_font_file, + _FixBadNameRecord(friendly_name, name_id, expected), + ) + ) + + # should have at least one actual + if not actuals: + results.append( + _SadResult( + "%s %s/%d 'name' %s has NO values" + % (name, style, weight, friendly_name), + full_font_file, + _FixMissingNameRecord(friendly_name, name_id, expected), + ) + ) - return results + return results + + +def _CheckLSB0ForEmptyGlyphs(path, font, ttf): + """Checks if font has empty (loca[n] == loca[n+1]) glyphs that have non-0 lsb. + + There is no reason to set such lsb's. + + Args: + path: Path to directory containing font. + font: A font record from a METADATA.pb. + ttf: A fontTools.ttLib.TTFont for the font. + Returns: + A list of ResultMessageTuple for tests performed. + """ + results = [] + if "loca" not in ttf: + return results + for glyph_index, glyph_name in enumerate(ttf.getGlyphOrder()): + is_empty = ttf["loca"][glyph_index] == ttf["loca"][glyph_index + 1] + lsb = ttf["hmtx"][glyph_name][1] + if is_empty and lsb != 0: + results.append( + _SadResult( + "%s %s/%d ['hmtx']['%s'][1] (lsb) should be 0 but is %d" + % (font.name, font.style, font.weight, glyph_name, lsb), + os.path.join(path, font.filename), + _FixEmptyGlyphLsb(glyph_name), + ) + ) + return results + + +def _CheckFontInternalValues(path): + """Validates fonts internal metadata matches METADATA.pb values. + + In particular, checks 'OS/2' {usWeightClass, fsSelection, fsType} and 'name' + {fullName, postScriptName} values. + + Args: + path: A directory containing a METADATA.pb file. + Returns: + A list of ResultMessageTuple, one per validation performed. + """ + results = [] + metadata = fonts.Metadata(path) + name = metadata.name + + for font in metadata.fonts: + font_file = font.filename + with contextlib.closing(ttLib.TTFont(os.path.join(path, font_file))) as ttf: + results.extend(_CheckFontOS2Values(path, font, ttf)) + results.extend(_CheckFontNameValues(path, name, font, ttf)) + results.extend(_CheckLSB0ForEmptyGlyphs(path, font, ttf)) + + return results def _WriteRepairScript(dest_file, results): - with open(dest_file, 'w') as out: - out.write('import collections\n') - out.write('import contextlib\n') - out.write('from fontTools import ttLib\n') - out.write('from google.protobuf.text_format ' - 'import text_format\n') - out.write('from gftools.fonts_public_pb2 import fonts_pb2\n') - out.write('from gftools.fonts_public_pb2 ' - 'import fonts_metadata_pb2\n') - out.write('\n') - - # group by path - by_path = collections.defaultdict(list) - for result in results: - if result.happy or not result.repair_script: - continue - if result.repair_script not in by_path[result.path]: - by_path[result.path].append(result.repair_script) - - for path in sorted(by_path.keys()): - out.write('# repair %s\n' % os.path.basename(path)) - _, ext = os.path.splitext(path) - - prefix = '' - if ext == '.ttf': - prefix = ' ' - out.write( - 'with contextlib.closing(ttLib.TTFont(\'%s\')) as ttf:\n' % path) - elif os.path.isdir(path): - metadata_file = os.path.join(path, 'METADATA.pb') - out.write('metadata = fonts_pb2.FamilyProto()\n') - out.write('with open(\'%s\', \'r\') as f:\n' % metadata_file) - out.write(' text_format.Merge(f.read(), metadata)\n') - else: - raise ValueError('Not sure how to script %s' % path) - - for repair in by_path[path]: - out.write(prefix) - out.write(re.sub('\n', '\n' + prefix, repair)) - out.write('\n') - - if ext == '.ttf': - out.write(' ttf.save(\'%s\')\n' % path) - - if os.path.isdir(path): - out.write('with open(\'%s\', \'w\') as f:\n' % metadata_file) - out.write(' f.write(text_format.MessageToString(metadata))\n') - - out.write('\n') + with open(dest_file, "w") as out: + out.write("import collections\n") + out.write("import contextlib\n") + out.write("from fontTools import ttLib\n") + out.write("from google.protobuf.text_format " "import text_format\n") + out.write("from gftools.fonts_public_pb2 import fonts_pb2\n") + out.write("from gftools.fonts_public_pb2 " "import fonts_metadata_pb2\n") + out.write("\n") + + # group by path + by_path = collections.defaultdict(list) + for result in results: + if result.happy or not result.repair_script: + continue + if result.repair_script not in by_path[result.path]: + by_path[result.path].append(result.repair_script) + + for path in sorted(by_path.keys()): + out.write("# repair %s\n" % os.path.basename(path)) + _, ext = os.path.splitext(path) + + prefix = "" + if ext == ".ttf": + prefix = " " + out.write( + "with contextlib.closing(ttLib.TTFont('%s')) as ttf:\n" % path + ) + elif os.path.isdir(path): + metadata_file = os.path.join(path, "METADATA.pb") + out.write("metadata = fonts_pb2.FamilyProto()\n") + out.write("with open('%s', 'r') as f:\n" % metadata_file) + out.write(" text_format.Merge(f.read(), metadata)\n") + else: + raise ValueError("Not sure how to script %s" % path) + + for repair in by_path[path]: + out.write(prefix) + out.write(re.sub("\n", "\n" + prefix, repair)) + out.write("\n") + + if ext == ".ttf": + out.write(" ttf.save('%s')\n" % path) + + if os.path.isdir(path): + out.write("with open('%s', 'w') as f:\n" % metadata_file) + out.write(" f.write(text_format.MessageToString(metadata))\n") + + out.write("\n") def main(argv): - result_code = 0 - all_results = [] - paths = [_DropEmptyPathSegments(os.path.expanduser(p)) for p in argv[1:]] - for path in paths: - if not os.path.isdir(path): - raise ValueError('Not a directory: %s' % path) - - for path in paths: - for font_dir in fonts.FontDirs(path): - results = _SanityCheck(font_dir) - all_results.extend(results) - for result in results: - result_msg = 'pass' - if not result.happy: - result_code = 1 - result_msg = 'FAIL' - if not result.happy or not FLAGS.suppress_pass: - print('%s: %s (%s)' % (result_msg, result.message, font_dir)) - - if FLAGS.repair_script: - _WriteRepairScript(FLAGS.repair_script, all_results) - - sys.exit(result_code) - - -if __name__ == '__main__': - app.run(main) - + result_code = 0 + all_results = [] + paths = [_DropEmptyPathSegments(os.path.expanduser(p)) for p in argv[1:]] + for path in paths: + if not os.path.isdir(path): + raise ValueError("Not a directory: %s" % path) + + for path in paths: + for font_dir in fonts.FontDirs(path): + results = _SanityCheck(font_dir) + all_results.extend(results) + for result in results: + result_msg = "pass" + if not result.happy: + result_code = 1 + result_msg = "FAIL" + if not result.happy or not FLAGS.suppress_pass: + print("%s: %s (%s)" % (result_msg, result.message, font_dir)) + + if FLAGS.repair_script: + _WriteRepairScript(FLAGS.repair_script, all_results) + + sys.exit(result_code) + + +if __name__ == "__main__": + app.run(main) diff --git a/Lib/gftools/scripts/set_primary_script.py b/Lib/gftools/scripts/set_primary_script.py index 78b58ac6..9e91f8cd 100755 --- a/Lib/gftools/scripts/set_primary_script.py +++ b/Lib/gftools/scripts/set_primary_script.py @@ -29,7 +29,9 @@ def main(args=None): import argparse - parser = argparse.ArgumentParser(description='Walk a directory tree and set the primary script') + parser = argparse.ArgumentParser( + description="Walk a directory tree and set the primary script" + ) parser.add_argument("directory") args = parser.parse_args(args) for path_obj in Path(args.directory).rglob("METADATA.pb"): @@ -49,5 +51,6 @@ def main(args=None): WriteMetadata(metadata, path_obj) -if __name__ == '__main__': - main() \ No newline at end of file + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/space_check.py b/Lib/gftools/scripts/space_check.py index 9235bdee..9c060d37 100755 --- a/Lib/gftools/scripts/space_check.py +++ b/Lib/gftools/scripts/space_check.py @@ -24,9 +24,12 @@ def main(): - print("This code has been deprecated; use fontbakery checks\n" - "com.google.fonts/check/whitespace_ink and\n" - "com.google.fonts/check/whitespace_widths instead") + print( + "This code has been deprecated; use fontbakery checks\n" + "com.google.fonts/check/whitespace_ink and\n" + "com.google.fonts/check/whitespace_widths instead" + ) -if __name__ == '__main__': - main() + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/tag_noto.py b/Lib/gftools/scripts/tag_noto.py index a2cd4d51..eb5803a6 100755 --- a/Lib/gftools/scripts/tag_noto.py +++ b/Lib/gftools/scripts/tag_noto.py @@ -17,31 +17,33 @@ import re -NOTO_FAMILY_NAME = re.compile(r'^Noto .*') +NOTO_FAMILY_NAME = re.compile(r"^Noto .*") -parser = argparse.ArgumentParser(description='Updates METADATA.pb to add is_noto field to families detected as Noto') -parser.add_argument('--preview', '-p', action='store_true',help='Preview mode') -parser.add_argument('metadata', metavar='METADATA', nargs="+",help='METADATA.pb files') +parser = argparse.ArgumentParser( + description="Updates METADATA.pb to add is_noto field to families detected as Noto" +) +parser.add_argument("--preview", "-p", action="store_true", help="Preview mode") +parser.add_argument("metadata", metavar="METADATA", nargs="+", help="METADATA.pb files") def main(args=None): - args = parser.parse_args(args) + args = parser.parse_args(args) - if args.preview: - print('Running in preview mode. No changes will be made.') - print('The names of families detected as part of the Noto') - print('collection will be printed below.') + if args.preview: + print("Running in preview mode. No changes will be made.") + print("The names of families detected as part of the Noto") + print("collection will be printed below.") - for path in args.metadata: - family = fonts.Metadata(path) - if NOTO_FAMILY_NAME.search(family.name): - if args.preview: - print(family.name) - else: - family.is_noto = True - fonts.WriteMetadata(family, path) + for path in args.metadata: + family = fonts.Metadata(path) + if NOTO_FAMILY_NAME.search(family.name): + if args.preview: + print(family.name) + else: + family.is_noto = True + fonts.WriteMetadata(family, path) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/ufo_fix_instances.py b/Lib/gftools/scripts/ufo_fix_instances.py index 68d2cd31..1e7d34c6 100755 --- a/Lib/gftools/scripts/ufo_fix_instances.py +++ b/Lib/gftools/scripts/ufo_fix_instances.py @@ -15,6 +15,7 @@ axis_reg = AxisRegistry() + def build_instances(ds): """Generate Designspace instances which are gf spec complaint""" instances = [] diff --git a/Lib/gftools/scripts/ufo_merge.py b/Lib/gftools/scripts/ufo_merge.py index ccfd6216..3c02d040 100755 --- a/Lib/gftools/scripts/ufo_merge.py +++ b/Lib/gftools/scripts/ufo_merge.py @@ -25,7 +25,9 @@ gs.add_argument("-g", "--glyphs", help="Glyphs to add from UFO 2", default="") gs.add_argument("-G", "--glyphs-file", help="File containing glyphs to add from UFO 2") gs.add_argument( - "-u", "--codepoints", help="Unicode codepoints to add from UFO 2", + "-u", + "--codepoints", + help="Unicode codepoints to add from UFO 2", ) gs.add_argument( "-U", @@ -77,6 +79,7 @@ parser.add_argument("ufo2", help="UFO font file to merge") parser.add_argument("--output", "-o", help="Output UFO font file") + def main(args): args = parser.parse_args(args) if args.replace_existing: @@ -106,7 +109,6 @@ def parse_cp(cp): return int(cp[2:], 16) return int(cp) - glyphs = set() if args.glyphs == "*": glyphs = ufo2.keys() @@ -142,5 +144,5 @@ def parse_cp(cp): ufo1.save(args.output, overwrite=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/Lib/gftools/scripts/ufo_set_order.py b/Lib/gftools/scripts/ufo_set_order.py index a56acecb..8566bc52 100755 --- a/Lib/gftools/scripts/ufo_set_order.py +++ b/Lib/gftools/scripts/ufo_set_order.py @@ -21,7 +21,9 @@ def set_glyph_order(origin, fonts): glyph_order = origin.glyphOrder for font in fonts: if font.glyphOrder != glyph_order: - print(f"Updating {os.path.basename(font.path)} since glyph order is different") + print( + f"Updating {os.path.basename(font.path)} since glyph order is different" + ) font.glyphOrder = glyph_order @@ -30,7 +32,9 @@ def main(args=None): description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument("fonts", nargs="+") - parser.add_argument("--origin", help="Source font to set glyph order for other fonts") + parser.add_argument( + "--origin", help="Source font to set glyph order for other fonts" + ) args = parser.parse_args(args) if len(args.fonts) <= 1: @@ -42,7 +46,7 @@ def main(args=None): fonts = args.fonts[1:] else: origin = args.origin - + fonts = [Font(fp) for fp in args.fonts] origin = Font(origin) @@ -53,4 +57,4 @@ def main(args=None): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/Lib/gftools/scripts/unicode_names.py b/Lib/gftools/scripts/unicode_names.py index 4b5fbb84..97fabd47 100755 --- a/Lib/gftools/scripts/unicode_names.py +++ b/Lib/gftools/scripts/unicode_names.py @@ -24,24 +24,27 @@ import sys import argparse -parser = argparse.ArgumentParser(description='Add Unicode character names to a nam file') -parser.add_argument('--nam_file', help='Location of nam file') +parser = argparse.ArgumentParser( + description="Add Unicode character names to a nam file" +) +parser.add_argument("--nam_file", help="Location of nam file") def main(args=None): - args = parser.parse_args(args) - with open(args.nam_file, 'r') as f: - for line in f: - print(_ReformatLine(line)) + args = parser.parse_args(args) + with open(args.nam_file, "r") as f: + for line in f: + print(_ReformatLine(line)) def _ReformatLine(line): - if line.startswith('0x'): - codepoint = int(line[2:6], 16) # This'll only work for BMP... - out = chr(codepoint) + ' ' + unicodedata.name(chr(codepoint), '') - return '0x%04X %s' % (codepoint, out) - else: - return line - -if __name__ == '__main__': - main() + if line.startswith("0x"): + codepoint = int(line[2:6], 16) # This'll only work for BMP... + out = chr(codepoint) + " " + unicodedata.name(chr(codepoint), "") + return "0x%04X %s" % (codepoint, out) + else: + return line + + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/update_families.py b/Lib/gftools/scripts/update_families.py index 9c1c7d31..96f69817 100755 --- a/Lib/gftools/scripts/update_families.py +++ b/Lib/gftools/scripts/update_families.py @@ -14,72 +14,81 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__author__="The Google Fonts Tools Authors" +__author__ = "The Google Fonts Tools Authors" import argparse import glob import logging import os # set up some command line argument processing -parser = argparse.ArgumentParser(description="Compare TTF files when upgrading families.") -parser.add_argument('arg_filepaths', nargs='+', - help='font file path(s) to check.' - ' Wildcards like *.ttf are allowed.') -parser.add_argument('-v', '--verbose', action='count', default=0, help="increase output verbosity") +parser = argparse.ArgumentParser( + description="Compare TTF files when upgrading families." +) +parser.add_argument( + "arg_filepaths", + nargs="+", + help="font file path(s) to check." " Wildcards like *.ttf are allowed.", +) +parser.add_argument( + "-v", "--verbose", action="count", default=0, help="increase output verbosity" +) -#===================================== +# ===================================== # Main sequence of checkers & fixers + def main(args=None): - # set up a basic logging config - # to include timestamps - # log_format = '%(asctime)s %(levelname)-8s %(message)s' - log_format = '%(levelname)-8s %(message)s ' - logger = logging.getLogger() - handler = logging.StreamHandler() - formatter = logging.Formatter(log_format) - handler.setFormatter(formatter) - logger.addHandler(handler) + # set up a basic logging config + # to include timestamps + # log_format = '%(asctime)s %(levelname)-8s %(message)s' + log_format = "%(levelname)-8s %(message)s " + logger = logging.getLogger() + handler = logging.StreamHandler() + formatter = logging.Formatter(log_format) + handler.setFormatter(formatter) + logger.addHandler(handler) - args = parser.parse_args(args) - if args.verbose == 1: - logger.setLevel(logging.INFO) - elif args.verbose >= 2: - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.ERROR) + args = parser.parse_args(args) + if args.verbose == 1: + logger.setLevel(logging.INFO) + elif args.verbose >= 2: + logger.setLevel(logging.DEBUG) + else: + logger.setLevel(logging.ERROR) - #------------------------------------------------------ - logging.debug("Checking each file is a ttf") - fonts_to_check = [] - for arg_filepath in sorted(args.arg_filepaths): - # use glob.glob to accept *.ttf - for fullpath in glob.glob(arg_filepath): - file_path, file_name = os.path.split(fullpath) - if file_name.endswith(".ttf"): - logging.debug("Adding '{}'".format(file_name)) - fonts_to_check.append(fullpath) - else: - logging.warning("Skipping '{}' as file is not a ttf".format(file_name)) - fonts_to_check.sort() + # ------------------------------------------------------ + logging.debug("Checking each file is a ttf") + fonts_to_check = [] + for arg_filepath in sorted(args.arg_filepaths): + # use glob.glob to accept *.ttf + for fullpath in glob.glob(arg_filepath): + file_path, file_name = os.path.split(fullpath) + if file_name.endswith(".ttf"): + logging.debug("Adding '{}'".format(file_name)) + fonts_to_check.append(fullpath) + else: + logging.warning("Skipping '{}' as file is not a ttf".format(file_name)) + fonts_to_check.sort() - #------------------------------------------------------ - for new_file in fonts_to_check: - logging.debug("Comparison of filesizes") - old_file = new_file + "-old" - new_filesize = os.path.getsize(new_file) - old_filesize = os.path.getsize(old_file) - delta = new_filesize - old_filesize - percentage = float(delta) / old_filesize - if delta>0: - logging.warning("New font file '{}' is {} bytes larger".format( - new_file, delta)) - elif delta<0: - logging.warning("New font file '{}' is {} bytes smaller".format( - new_file, -delta)) - else: - logging.info("New font file '{}' preserves filesize.".format(new_file)) + # ------------------------------------------------------ + for new_file in fonts_to_check: + logging.debug("Comparison of filesizes") + old_file = new_file + "-old" + new_filesize = os.path.getsize(new_file) + old_filesize = os.path.getsize(old_file) + delta = new_filesize - old_filesize + percentage = float(delta) / old_filesize + if delta > 0: + logging.warning( + "New font file '{}' is {} bytes larger".format(new_file, delta) + ) + elif delta < 0: + logging.warning( + "New font file '{}' is {} bytes smaller".format(new_file, -delta) + ) + else: + logging.info("New font file '{}' preserves filesize.".format(new_file)) -if __name__=='__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/update_nameids.py b/Lib/gftools/scripts/update_nameids.py index c86f1539..a92f5711 100755 --- a/Lib/gftools/scripts/update_nameids.py +++ b/Lib/gftools/scripts/update_nameids.py @@ -27,78 +27,68 @@ """ from __future__ import print_function from fontTools.ttLib import TTFont -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter NAME_IDS = { - 0: 'copyright', - 3: 'uniqueid', - 5: 'version', - 7: 'trademark', - 8: 'manufacturer', - 9: 'designer', - 10: 'description', - 11: 'urlvendor', - 12: 'urldesigner', - 13: 'license', - 14: 'urllicense', + 0: "copyright", + 3: "uniqueid", + 5: "version", + 7: "trademark", + 8: "manufacturer", + 9: "designer", + 10: "description", + 11: "urlvendor", + 12: "urldesigner", + 13: "license", + 14: "urllicense", } + def swap_name(field, font_name_field, new_name): - '''Replace a font's name field with a new name''' - enc = font_name_field.getName(*field).getEncoding() - text = font_name_field.getName(*field).toUnicode() - text = new_name - font_name_field.setName(text, *field) + """Replace a font's name field with a new name""" + enc = font_name_field.getName(*field).getEncoding() + text = font_name_field.getName(*field).toUnicode() + text = new_name + font_name_field.setName(text, *field) def update_field(arg, args, fields, nametable): - if hasattr(args, arg): - text = getattr(args, arg) - if text: - swap_name(fields, nametable, text) - - -parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) -parser.add_argument('fonts', nargs="+") -parser.add_argument('-c', '--copyright', type=str, - help='Update copyright string') -parser.add_argument('-u', '--uniqueid', type=str, - help='Update uniqueid string') -parser.add_argument('-v', '--version', type=str, - help='Update version string') -parser.add_argument('-t', '--trademark', type=str, - help='Update trademark string') -parser.add_argument('-m', '--manufacturer', type=str, - help='Update manufacturer string') -parser.add_argument('-d', '--designer', type=str, - help='Update designer string') -parser.add_argument('-desc', '--description', type=str, - help='Update description string') -parser.add_argument('-l', '--license', type=str, - help='Update license string') -parser.add_argument('-uv', '--urlvendor', type=str, - help='Update url vendor string') -parser.add_argument('-ud', '--urldesigner', type=str, - help='Update url vendor string') -parser.add_argument('-ul', '--urllicense', type=str, - help='Update url license string') + if hasattr(args, arg): + text = getattr(args, arg) + if text: + swap_name(fields, nametable, text) + + +parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) +parser.add_argument("fonts", nargs="+") +parser.add_argument("-c", "--copyright", type=str, help="Update copyright string") +parser.add_argument("-u", "--uniqueid", type=str, help="Update uniqueid string") +parser.add_argument("-v", "--version", type=str, help="Update version string") +parser.add_argument("-t", "--trademark", type=str, help="Update trademark string") +parser.add_argument("-m", "--manufacturer", type=str, help="Update manufacturer string") +parser.add_argument("-d", "--designer", type=str, help="Update designer string") +parser.add_argument( + "-desc", "--description", type=str, help="Update description string" +) +parser.add_argument("-l", "--license", type=str, help="Update license string") +parser.add_argument("-uv", "--urlvendor", type=str, help="Update url vendor string") +parser.add_argument("-ud", "--urldesigner", type=str, help="Update url vendor string") +parser.add_argument("-ul", "--urllicense", type=str, help="Update url license string") def main(args=None): - args = parser.parse_args(args) + args = parser.parse_args(args) - for font_path in args.fonts: - font = TTFont(font_path) - for field in font['name'].names: - fields = (field.nameID, field.platformID, field.platEncID, field.langID) - if field.nameID in NAME_IDS: - update_field(NAME_IDS[field.nameID], args, fields, font['name']) + for font_path in args.fonts: + font = TTFont(font_path) + for field in font["name"].names: + fields = (field.nameID, field.platformID, field.platEncID, field.langID) + if field.nameID in NAME_IDS: + update_field(NAME_IDS[field.nameID], args, fields, font["name"]) - font.save(font_path + '.fix') - print('font saved %s.fix' % font_path) + font.save(font_path + ".fix") + print("font saved %s.fix" % font_path) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/update_version.py b/Lib/gftools/scripts/update_version.py index 03498878..6b59533d 100755 --- a/Lib/gftools/scripts/update_version.py +++ b/Lib/gftools/scripts/update_version.py @@ -25,61 +25,47 @@ gftools update-version [fonts] 2.300 2.301 """ from __future__ import print_function -from argparse import (ArgumentParser, - RawTextHelpFormatter) +from argparse import ArgumentParser, RawTextHelpFormatter from fontTools.ttLib import TTFont -parser = ArgumentParser(description=__doc__, - formatter_class=RawTextHelpFormatter) -parser.add_argument('--old_version', - help="Old version number", - required=True, - type=str) -parser.add_argument('--new_version', - help="New Version number", - required=True, - type=str) -parser.add_argument('fonts', - nargs="+", - help="Fonts in OpenType (TTF/OTF) format") +parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) +parser.add_argument("--old_version", help="Old version number", required=True, type=str) +parser.add_argument("--new_version", help="New Version number", required=True, type=str) +parser.add_argument("fonts", nargs="+", help="Fonts in OpenType (TTF/OTF) format") def main(args=None): - args = parser.parse_args(args) - for font_path in args.fonts: - font = TTFont(font_path) + args = parser.parse_args(args) + for font_path in args.fonts: + font = TTFont(font_path) - v_updated = False - for field in font['name'].names: - field_text = field.toUnicode() - if args.old_version in field_text: - updated_text = field_text.replace( - args.old_version, - args.new_version - ) - font['name'].setName( - updated_text, - field.nameID, - field.platformID, - field.platEncID, - field.langID - ) - v_updated = True - if v_updated: - font['head'].fontRevision = float(args.new_version) - print('%s version updated from %s to %s' % ( - font_path, - args.old_version, - args.new_version - )) - font.save(font_path + '.fix') - print('font saved %s.fix' % font_path) - else: - print ('%s skipping. Could not find old version number %s' % ( - font_path, - args.old_version - )) + v_updated = False + for field in font["name"].names: + field_text = field.toUnicode() + if args.old_version in field_text: + updated_text = field_text.replace(args.old_version, args.new_version) + font["name"].setName( + updated_text, + field.nameID, + field.platformID, + field.platEncID, + field.langID, + ) + v_updated = True + if v_updated: + font["head"].fontRevision = float(args.new_version) + print( + "%s version updated from %s to %s" + % (font_path, args.old_version, args.new_version) + ) + font.save(font_path + ".fix") + print("font saved %s.fix" % font_path) + else: + print( + "%s skipping. Could not find old version number %s" + % (font_path, args.old_version) + ) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/varfont_info.py b/Lib/gftools/scripts/varfont_info.py index bd326d74..302a0923 100755 --- a/Lib/gftools/scripts/varfont_info.py +++ b/Lib/gftools/scripts/varfont_info.py @@ -28,41 +28,50 @@ def _ResolveName(ttf, name_id): - if name_id == 0xFFFF: - return '[anonymous]' - names = [n for n in ttf['name'].names if n.nameID == name_id] - if not names: - return '[?nameID=%d?]' % name_id - unicode_names = [n for n in names if n.isUnicode()] - if unicode_names: - return unicode_names[0].toUnicode() - return names[0].toUnicode() + if name_id == 0xFFFF: + return "[anonymous]" + names = [n for n in ttf["name"].names if n.nameID == name_id] + if not names: + return "[?nameID=%d?]" % name_id + unicode_names = [n for n in names if n.isUnicode()] + if unicode_names: + return unicode_names[0].toUnicode() + return names[0].toUnicode() def main(args=None): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('fonts', metavar="TTF", nargs="+", - help="Fonts in OpenType (TTF/OTF) format") + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "fonts", metavar="TTF", nargs="+", help="Fonts in OpenType (TTF/OTF) format" + ) - args = parser.parse_args(args) - for filename in args.fonts: - with contextlib.closing(ttLib.TTFont(filename)) as ttf: - print(filename) - if 'fvar' not in ttf: - print("This font file lacks an 'fvar' table.") - else: - fvar = ttf['fvar'] - print(' axes') - axes = [(a.axisTag, a.minValue, a.defaultValue, a.maxValue) - for a in fvar.axes] - for tag, minv, defv, maxv in axes: - print(" '%s' %d-%d, default %d" % (tag, minv, maxv, defv)) + args = parser.parse_args(args) + for filename in args.fonts: + with contextlib.closing(ttLib.TTFont(filename)) as ttf: + print(filename) + if "fvar" not in ttf: + print("This font file lacks an 'fvar' table.") + else: + fvar = ttf["fvar"] + print(" axes") + axes = [ + (a.axisTag, a.minValue, a.defaultValue, a.maxValue) + for a in fvar.axes + ] + for tag, minv, defv, maxv in axes: + print(" '%s' %d-%d, default %d" % (tag, minv, maxv, defv)) - if fvar.instances: - print(' named-instances') - for inst in fvar.instances: - print(' %s %s' % (_ResolveName(ttf, inst.postscriptNameID), - inst.coordinates)) + if fvar.instances: + print(" named-instances") + for inst in fvar.instances: + print( + " %s %s" + % ( + _ResolveName(ttf, inst.postscriptNameID), + inst.coordinates, + ) + ) -if __name__ == '__main__': - main() + +if __name__ == "__main__": + main() diff --git a/Lib/gftools/scripts/what_subsets.py b/Lib/gftools/scripts/what_subsets.py index 9bc80b78..5117007c 100755 --- a/Lib/gftools/scripts/what_subsets.py +++ b/Lib/gftools/scripts/what_subsets.py @@ -24,20 +24,30 @@ from gfsubsets import SubsetsInFont parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument('--min_pct', type=int, default=0, help='What percentage of subset codepoints have to be supported' - ' for a non-ext subset.') -parser.add_argument('--min_pct_ext', type=int, default=0, help='What percentage of subset codepoints have to be supported' - ' for an -ext subset.') -parser.add_argument('fonts', nargs='+', metavar="FONT") +parser.add_argument( + "--min_pct", + type=int, + default=0, + help="What percentage of subset codepoints have to be supported" + " for a non-ext subset.", +) +parser.add_argument( + "--min_pct_ext", + type=int, + default=0, + help="What percentage of subset codepoints have to be supported" + " for an -ext subset.", +) +parser.add_argument("fonts", nargs="+", metavar="FONT") def main(args=None): - args = parser.parse_args(args) - for arg in args.fonts: - subsets = SubsetsInFont(arg, args.min_pct, args.min_pct_ext) - for (subset, available, total) in subsets: - print('%s %s %d/%d' % (os.path.basename(arg), subset, available, total)) + args = parser.parse_args(args) + for arg in args.fonts: + subsets = SubsetsInFont(arg, args.min_pct, args.min_pct_ext) + for subset, available, total in subsets: + print("%s %s %d/%d" % (os.path.basename(arg), subset, available, total)) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/Lib/gftools/stat.py b/Lib/gftools/stat.py index def34bbd..997419e6 100644 --- a/Lib/gftools/stat.py +++ b/Lib/gftools/stat.py @@ -26,10 +26,11 @@ def gen_stat_tables(ttFonts): - from axisregistry import build_stat - for ttFont in ttFonts: - siblings = [f for f in ttFonts if f != ttFont] - build_stat(ttFont, siblings) + from axisregistry import build_stat + + for ttFont in ttFonts: + siblings = [f for f in ttFonts if f != ttFont] + build_stat(ttFont, siblings) def gen_stat_tables_from_config(stat, varfonts, has_italic=None, locations=None): @@ -89,12 +90,14 @@ def gen_stat_tables_from_config(stat, varfonts, has_italic=None, locations=None) if ax["name"] == "ital": raise ValueError("ital axis should not appear in stat config") ital_stat_for_roman = { - "name": "Italic", "tag": "ital", - "values": [dict(value=0, name="Roman", flags=0x2, linkedValue=1)] + "name": "Italic", + "tag": "ital", + "values": [dict(value=0, name="Roman", flags=0x2, linkedValue=1)], } ital_stat_for_italic = { - "name": "Italic", "tag": "ital", - "values": [dict(value=1, name="Italic")] + "name": "Italic", + "tag": "ital", + "values": [dict(value=1, name="Italic")], } stat.append({}) # We will switch this entry between Roman and Italic diff --git a/Lib/gftools/subsetmerger.py b/Lib/gftools/subsetmerger.py index 5697411c..d8be30dd 100644 --- a/Lib/gftools/subsetmerger.py +++ b/Lib/gftools/subsetmerger.py @@ -42,7 +42,9 @@ { "from": Enum(SUBSET_SOURCES.keys()) | Map({"repo": Str(), "path": Str()}), Optional("name"): Str(), - Optional("ranges"): Seq(Map({"start": (HexInt() | Int()), "end": (HexInt() | Int())})), + Optional("ranges"): Seq( + Map({"start": (HexInt() | Int()), "end": (HexInt() | Int())}) + ), Optional("layoutHandling"): Str(), Optional("force"): Str(), } @@ -88,7 +90,13 @@ def prepare_minimal_subsets(subsets): class SubsetMerger: def __init__( - self, input_ds, output_ds, subsets, googlefonts=False, cache="../subset-files", json=False + self, + input_ds, + output_ds, + subsets, + googlefonts=False, + cache="../subset-files", + json=False, ): self.input = input_ds self.output = output_ds diff --git a/Lib/gftools/util/styles.py b/Lib/gftools/util/styles.py index a1b48861..b321f9ac 100644 --- a/Lib/gftools/util/styles.py +++ b/Lib/gftools/util/styles.py @@ -1,62 +1,63 @@ - -STYLE_NAMES = ["Thin", - "ExtraLight", - "Light", - "Regular", - "Medium", - "SemiBold", - "Bold", - "ExtraBold", - "Black", - "Thin Italic", - "ExtraLight Italic", - "Light Italic", - "Italic", - "Medium Italic", - "SemiBold Italic", - "Bold Italic", - "ExtraBold Italic", - "Black Italic"] - -RIBBI_STYLE_NAMES = ["Regular", - "Italic", - "Bold", - "BoldItalic"] +STYLE_NAMES = [ + "Thin", + "ExtraLight", + "Light", + "Regular", + "Medium", + "SemiBold", + "Bold", + "ExtraBold", + "Black", + "Thin Italic", + "ExtraLight Italic", + "Light Italic", + "Italic", + "Medium Italic", + "SemiBold Italic", + "Bold Italic", + "ExtraBold Italic", + "Black Italic", +] + +RIBBI_STYLE_NAMES = ["Regular", "Italic", "Bold", "BoldItalic"] def get_stylename(filename): - filename_base = filename.split('.')[0] - return filename_base.split('-')[-1] + filename_base = filename.split(".")[0] + return filename_base.split("-")[-1] + def _familyname(filename): - filename_base = filename.split('.')[0] - names = filename_base.split('-') - names.pop() - return '-'.join(names) + filename_base = filename.split(".")[0] + names = filename_base.split("-") + names.pop() + return "-".join(names) + def is_italic(stylename): - return 'Italic' in stylename + return "Italic" in stylename def is_regular(stylename): - return ("Regular" in stylename or - (stylename in STYLE_NAMES and - stylename not in RIBBI_STYLE_NAMES and - "Italic" not in stylename)) + return "Regular" in stylename or ( + stylename in STYLE_NAMES + and stylename not in RIBBI_STYLE_NAMES + and "Italic" not in stylename + ) def is_bold(stylename): - return stylename in ["Bold", "BoldItalic"] + return stylename in ["Bold", "BoldItalic"] def is_filename_canonical(filename): - if '-' not in filename: - return False - else: - style = get_stylename(filename) - for valid in STYLE_NAMES: - valid = ''.join(valid.split(' ')) - if style == valid: - return True - # otherwise: - return False + if "-" not in filename: + return False + else: + style = get_stylename(filename) + for valid in STYLE_NAMES: + valid = "".join(valid.split(" ")) + if style == valid: + return True + # otherwise: + return False diff --git a/Lib/gftools/util/udhr.py b/Lib/gftools/util/udhr.py index 991e4739..87869e19 100644 --- a/Lib/gftools/util/udhr.py +++ b/Lib/gftools/util/udhr.py @@ -2,239 +2,251 @@ import enum import re -class Udhr(): - - def __init__(self, key, iso639_3, iso15924, bcp47, direction, ohchr, stage, loc, name): - self.key = key - self.iso639_3 = iso639_3 - self.iso15924 = iso15924 - self.bcp47 = bcp47 - self.direction = direction - self.ohchr = ohchr - self.stage = stage - self.loc = loc - self.name = name - - self.title = None - self.preamble = None - self.articles = [] - - def Parse(self, translation_data): - if translation_data is None or self.stage < 2: - return - - if translation_data.find('./{*}title') is not None: - self.title = translation_data.find('./{*}title').text - - preamble_data = translation_data.find('./{*}preamble') - if preamble_data is not None: - if preamble_data.find('./{*}title') is not None: - self.preamble = { - 'title': - preamble_data.find('./{*}title').text, - 'content': [ - para.text for para in preamble_data.findall('./{*}para') - ], - } - - articles_data = translation_data.findall('./{*}article') - for article_data in articles_data: - title_data = article_data.find('./{*}title') - article = { - 'id': - int(article_data.get('number')), - 'title': None if title_data is None else title_data.text, - 'content': [ - para.text for para in article_data.findall('./{*}para') - ], - } - self.articles.append(article) - - def LoadArticleOne(self, article_one): - self.articles.append({'id': 0, 'title': None, 'content': [article_one]}) - - def GetSampleTexts(self): - extractor = SampleTextExtractor(self) - return extractor.GetSampleTexts() - - -class SampleTextExtractor(): - - class TextType(enum.Enum): - GLYPHS = 1 - WORD = 2 - PHRASE = 3 - SENTENCE = 4 - PARAGRAPH = 5 - PASSAGE = 6 - - def __init__(self, udhr): - self._udhr = udhr - self._glyphs = iter(self._GetGlyphs()) - self._words = iter(self._GetWords()) - self._paragraphs = iter(self._GetParagraphs()) - self._phrase_history = set() - - self._non_word_regex = re.compile(r'[^\w]+') - self._space_regex = re.compile(r'\s+') - self._non_space_regex = re.compile(r'[^\s]+') - self._non_word_space_regex = re.compile(r'[^\w\s]+') - self._any_regex = re.compile(r'.') - - def _DisplayLength(self, s): - """Returns length of given string. Omits combining characters. - - Some entire scripts will not be counted; in those cases, the raw length of - the string is returned. - """ - word_space_length = len(self._non_word_space_regex.sub('', s)) - space_length = len(self._non_space_regex.sub('', s)) - if word_space_length == space_length: - return len(s) - return word_space_length - - def _GetGlyphs(self): - seen = set() - for article in self._udhr.articles: - for para in article['content']: - for ch in (self._non_word_regex.sub('', para) or self._space_regex.sub('', para)): - ch = ch.lower() - if ch not in seen: - seen.add(ch) - yield ch - - def _GetWords(self): - if self._space_regex.search(self._udhr.articles[0]['content'][0]) is not None: - splitter = self._space_regex - else: - splitter = self._non_word_regex - - seen = set() - for article in self._udhr.articles: - for para in article['content']: - for s in splitter.split(para): - if s not in seen: - seen.add(s) - yield s - - def _GetParagraphs(self): - if self._udhr.preamble is not None: - for para in self._udhr.preamble['content']: - yield para - for article in self._udhr.articles: - for para in article['content']: - yield para - - def _ExtractGlyphs(self, min_chars, max_chars): - s = '' - for ch in self._glyphs: - s += ch.upper() - if len(s) >= min_chars: - break - if ch != ch.upper(): - s += ch - if len(s) >= min_chars: - break - return s - - def _ExtractWord(self, min_chars, max_chars): - for iterator in [self._words, self._GetWords()]: - for w in iterator: - if w is None: - continue - if min_chars <= self._DisplayLength(w) <= max_chars: - return w - # Fallback to using multiple words for languages with very small words - return self._ExtractPhrase(min_chars, max_chars) - - def _ExtractPhrase(self, min_chars, max_chars): - for iterator in [self._paragraphs, self._GetParagraphs()]: - for para in iterator: - if para is None: - continue - for regex in [self._any_regex, self._space_regex, self._non_word_regex]: - breaks = [-1] - for match in regex.finditer(para, min_chars): - breaks.append(match.start()) - phrase = para[breaks[0]+1:breaks[len(breaks)-1]] - p_size = self._DisplayLength(phrase) - while p_size > max_chars and len(breaks) > 1: - breaks.pop() - phrase = para[breaks[0]+1:breaks[len(breaks)-1]] - p_size = self._DisplayLength(phrase) - if min_chars <= p_size and phrase not in self._phrase_history: - self._phrase_history.add(phrase) - return phrase - return self._ExtractParagraph(min_chars, max_chars) - - def _ExtractSentence(self, min_chars, max_chars): - # Sentence delimination may differ between scripts, so tokenizing on spaces - # would be unreliable. Prefer to use _ExtractPhrase. - return self._ExtractPhrase(min_chars, max_chars) - - def _ExtractParagraph(self, min_chars, max_chars): - for iterator in [self._paragraphs, self._GetParagraphs()]: - for para in iterator: - if para is None: - continue - if min_chars <= self._DisplayLength(para) <= max_chars: - return para - # Paragraphs likely insufficient length; try combining into passages - return self._ExtractPassage(min_chars, max_chars) - - def _ExtractPassage(self, min_chars, max_chars): - p = [] - p_size = 0 - while p_size < min_chars: - for iterator in [self._paragraphs, self._GetParagraphs()]: - for para in iterator: - if para is None: - continue - p.append(para) - p_size = self._DisplayLength(' '.join(p)) - if max_chars < p_size: - p.pop() - elif min_chars <= p_size: - return '\n'.join(p) - assert len(p) > 0, 'Unable to extract passage: ' + self._udhr.key - if len(p) == 0: - p.append([p for p in self._GetParagraphs()][0]) - return '\n'.join(p) - - def _Get(self, text_type, **kwargs): - if 'char_count' in kwargs: - min_chars = kwargs['char_count'] - max_chars = kwargs['char_count'] - else: - min_chars = kwargs['min_chars'] - max_chars = kwargs['max_chars'] - if text_type == self.TextType.GLYPHS: - return self._ExtractGlyphs(min_chars, max_chars) - if text_type == self.TextType.WORD: - return self._ExtractWord(min_chars, max_chars) - if text_type == self.TextType.PHRASE: - return self._ExtractPhrase(min_chars, max_chars) - if text_type == self.TextType.SENTENCE: - return self._ExtractSentence(min_chars, max_chars) - if text_type == self.TextType.PARAGRAPH: - return self._ExtractParagraph(min_chars, max_chars) - if text_type == self.TextType.PASSAGE: - return self._ExtractPassage(min_chars, max_chars) - raise Exception('Unsupported text type: ' + text_type) - - def GetSampleTexts(self): - sample_text = fonts_public_pb2.SampleTextProto() - sample_text.masthead_full = self._Get(self.TextType.GLYPHS, char_count = 4) - sample_text.masthead_partial = self._Get(self.TextType.GLYPHS, char_count = 2) - sample_text.styles = self._Get(self.TextType.PHRASE, min_chars = 40, max_chars = 60) - sample_text.tester = self._Get(self.TextType.PHRASE, min_chars = 60, max_chars = 90) - sample_text.poster_sm = self._Get(self.TextType.PHRASE, min_chars = 10, max_chars = 17) - sample_text.poster_md = self._Get(self.TextType.PHRASE, min_chars = 6, max_chars = 12) - sample_text.poster_lg = self._Get(self.TextType.WORD, min_chars = 3, max_chars = 8) - sample_text.specimen_48 = self._Get(self.TextType.SENTENCE, min_chars = 50, max_chars = 80) - sample_text.specimen_36 = self._Get(self.TextType.PARAGRAPH, min_chars = 100, max_chars = 120) - sample_text.specimen_32 = self._Get(self.TextType.PARAGRAPH, min_chars = 140, max_chars = 180) - sample_text.specimen_21 = self._Get(self.TextType.PASSAGE, min_chars = 300, max_chars = 500) - sample_text.specimen_16 = self._Get(self.TextType.PASSAGE, min_chars = 550, max_chars = 750) - return sample_text +class Udhr: + def __init__( + self, key, iso639_3, iso15924, bcp47, direction, ohchr, stage, loc, name + ): + self.key = key + self.iso639_3 = iso639_3 + self.iso15924 = iso15924 + self.bcp47 = bcp47 + self.direction = direction + self.ohchr = ohchr + self.stage = stage + self.loc = loc + self.name = name + + self.title = None + self.preamble = None + self.articles = [] + + def Parse(self, translation_data): + if translation_data is None or self.stage < 2: + return + + if translation_data.find("./{*}title") is not None: + self.title = translation_data.find("./{*}title").text + + preamble_data = translation_data.find("./{*}preamble") + if preamble_data is not None: + if preamble_data.find("./{*}title") is not None: + self.preamble = { + "title": preamble_data.find("./{*}title").text, + "content": [ + para.text for para in preamble_data.findall("./{*}para") + ], + } + + articles_data = translation_data.findall("./{*}article") + for article_data in articles_data: + title_data = article_data.find("./{*}title") + article = { + "id": int(article_data.get("number")), + "title": None if title_data is None else title_data.text, + "content": [para.text for para in article_data.findall("./{*}para")], + } + self.articles.append(article) + + def LoadArticleOne(self, article_one): + self.articles.append({"id": 0, "title": None, "content": [article_one]}) + + def GetSampleTexts(self): + extractor = SampleTextExtractor(self) + return extractor.GetSampleTexts() + + +class SampleTextExtractor: + class TextType(enum.Enum): + GLYPHS = 1 + WORD = 2 + PHRASE = 3 + SENTENCE = 4 + PARAGRAPH = 5 + PASSAGE = 6 + + def __init__(self, udhr): + self._udhr = udhr + self._glyphs = iter(self._GetGlyphs()) + self._words = iter(self._GetWords()) + self._paragraphs = iter(self._GetParagraphs()) + self._phrase_history = set() + + self._non_word_regex = re.compile(r"[^\w]+") + self._space_regex = re.compile(r"\s+") + self._non_space_regex = re.compile(r"[^\s]+") + self._non_word_space_regex = re.compile(r"[^\w\s]+") + self._any_regex = re.compile(r".") + + def _DisplayLength(self, s): + """Returns length of given string. Omits combining characters. + + Some entire scripts will not be counted; in those cases, the raw length of + the string is returned. + """ + word_space_length = len(self._non_word_space_regex.sub("", s)) + space_length = len(self._non_space_regex.sub("", s)) + if word_space_length == space_length: + return len(s) + return word_space_length + + def _GetGlyphs(self): + seen = set() + for article in self._udhr.articles: + for para in article["content"]: + for ch in self._non_word_regex.sub("", para) or self._space_regex.sub( + "", para + ): + ch = ch.lower() + if ch not in seen: + seen.add(ch) + yield ch + + def _GetWords(self): + if self._space_regex.search(self._udhr.articles[0]["content"][0]) is not None: + splitter = self._space_regex + else: + splitter = self._non_word_regex + + seen = set() + for article in self._udhr.articles: + for para in article["content"]: + for s in splitter.split(para): + if s not in seen: + seen.add(s) + yield s + + def _GetParagraphs(self): + if self._udhr.preamble is not None: + for para in self._udhr.preamble["content"]: + yield para + for article in self._udhr.articles: + for para in article["content"]: + yield para + + def _ExtractGlyphs(self, min_chars, max_chars): + s = "" + for ch in self._glyphs: + s += ch.upper() + if len(s) >= min_chars: + break + if ch != ch.upper(): + s += ch + if len(s) >= min_chars: + break + return s + + def _ExtractWord(self, min_chars, max_chars): + for iterator in [self._words, self._GetWords()]: + for w in iterator: + if w is None: + continue + if min_chars <= self._DisplayLength(w) <= max_chars: + return w + # Fallback to using multiple words for languages with very small words + return self._ExtractPhrase(min_chars, max_chars) + + def _ExtractPhrase(self, min_chars, max_chars): + for iterator in [self._paragraphs, self._GetParagraphs()]: + for para in iterator: + if para is None: + continue + for regex in [self._any_regex, self._space_regex, self._non_word_regex]: + breaks = [-1] + for match in regex.finditer(para, min_chars): + breaks.append(match.start()) + phrase = para[breaks[0] + 1 : breaks[len(breaks) - 1]] + p_size = self._DisplayLength(phrase) + while p_size > max_chars and len(breaks) > 1: + breaks.pop() + phrase = para[breaks[0] + 1 : breaks[len(breaks) - 1]] + p_size = self._DisplayLength(phrase) + if min_chars <= p_size and phrase not in self._phrase_history: + self._phrase_history.add(phrase) + return phrase + return self._ExtractParagraph(min_chars, max_chars) + + def _ExtractSentence(self, min_chars, max_chars): + # Sentence delimination may differ between scripts, so tokenizing on spaces + # would be unreliable. Prefer to use _ExtractPhrase. + return self._ExtractPhrase(min_chars, max_chars) + + def _ExtractParagraph(self, min_chars, max_chars): + for iterator in [self._paragraphs, self._GetParagraphs()]: + for para in iterator: + if para is None: + continue + if min_chars <= self._DisplayLength(para) <= max_chars: + return para + # Paragraphs likely insufficient length; try combining into passages + return self._ExtractPassage(min_chars, max_chars) + + def _ExtractPassage(self, min_chars, max_chars): + p = [] + p_size = 0 + while p_size < min_chars: + for iterator in [self._paragraphs, self._GetParagraphs()]: + for para in iterator: + if para is None: + continue + p.append(para) + p_size = self._DisplayLength(" ".join(p)) + if max_chars < p_size: + p.pop() + elif min_chars <= p_size: + return "\n".join(p) + assert len(p) > 0, "Unable to extract passage: " + self._udhr.key + if len(p) == 0: + p.append([p for p in self._GetParagraphs()][0]) + return "\n".join(p) + + def _Get(self, text_type, **kwargs): + if "char_count" in kwargs: + min_chars = kwargs["char_count"] + max_chars = kwargs["char_count"] + else: + min_chars = kwargs["min_chars"] + max_chars = kwargs["max_chars"] + if text_type == self.TextType.GLYPHS: + return self._ExtractGlyphs(min_chars, max_chars) + if text_type == self.TextType.WORD: + return self._ExtractWord(min_chars, max_chars) + if text_type == self.TextType.PHRASE: + return self._ExtractPhrase(min_chars, max_chars) + if text_type == self.TextType.SENTENCE: + return self._ExtractSentence(min_chars, max_chars) + if text_type == self.TextType.PARAGRAPH: + return self._ExtractParagraph(min_chars, max_chars) + if text_type == self.TextType.PASSAGE: + return self._ExtractPassage(min_chars, max_chars) + raise Exception("Unsupported text type: " + text_type) + + def GetSampleTexts(self): + sample_text = fonts_public_pb2.SampleTextProto() + sample_text.masthead_full = self._Get(self.TextType.GLYPHS, char_count=4) + sample_text.masthead_partial = self._Get(self.TextType.GLYPHS, char_count=2) + sample_text.styles = self._Get(self.TextType.PHRASE, min_chars=40, max_chars=60) + sample_text.tester = self._Get(self.TextType.PHRASE, min_chars=60, max_chars=90) + sample_text.poster_sm = self._Get( + self.TextType.PHRASE, min_chars=10, max_chars=17 + ) + sample_text.poster_md = self._Get( + self.TextType.PHRASE, min_chars=6, max_chars=12 + ) + sample_text.poster_lg = self._Get(self.TextType.WORD, min_chars=3, max_chars=8) + sample_text.specimen_48 = self._Get( + self.TextType.SENTENCE, min_chars=50, max_chars=80 + ) + sample_text.specimen_36 = self._Get( + self.TextType.PARAGRAPH, min_chars=100, max_chars=120 + ) + sample_text.specimen_32 = self._Get( + self.TextType.PARAGRAPH, min_chars=140, max_chars=180 + ) + sample_text.specimen_21 = self._Get( + self.TextType.PASSAGE, min_chars=300, max_chars=500 + ) + sample_text.specimen_16 = self._Get( + self.TextType.PASSAGE, min_chars=550, max_chars=750 + ) + return sample_text diff --git a/Lib/gftools/utils.py b/Lib/gftools/utils.py index c8e6db6b..90fd6a39 100644 --- a/Lib/gftools/utils.py +++ b/Lib/gftools/utils.py @@ -40,6 +40,7 @@ from collections import Counter from collections import defaultdict from pathlib import Path + if sys.version_info[0] == 3: from configparser import ConfigParser else: @@ -49,14 +50,16 @@ # ===================================== # HELPER FUNCTIONS -PROD_FAMILY_DOWNLOAD = 'https://fonts.google.com/download?family={}' +PROD_FAMILY_DOWNLOAD = "https://fonts.google.com/download?family={}" -def download_family_from_Google_Fonts(family, dst=None, dl_url=PROD_FAMILY_DOWNLOAD, ignore_static=True): +def download_family_from_Google_Fonts( + family, dst=None, dl_url=PROD_FAMILY_DOWNLOAD, ignore_static=True +): """Download a font family from Google Fonts""" # TODO (M Foley) update all dl_urls in .ini files. dl_url = dl_url.replace("download?family=", "download/list?family=") - url = dl_url.format(family.replace(' ', '%20')) + url = dl_url.format(family.replace(" ", "%20")) data = json.loads(requests.get(url).text[5:]) res = [] for item in data["manifest"]["fileRefs"]: @@ -155,13 +158,11 @@ def download_files_in_github_pr( dirs = set([os.path.dirname(p.filename) for p in files]) results = [] for d in dirs: - if ignore_static_dir and '/static' in d: + if ignore_static_dir and "/static" in d: continue url = os.path.join( - pull.head.repo.html_url, - "tree", - pull.head.ref, # head branch - d) + pull.head.repo.html_url, "tree", pull.head.ref, d # head branch + ) results += download_files_in_github_dir(url, dst, overwrite=False) return results @@ -181,12 +182,7 @@ def download_files_in_github_pr( return results -def download_files_in_github_dir( - orig_url, - dst, - filter_files=[], - overwrite=True -): +def download_files_in_github_dir(orig_url, dst, filter_files=[], overwrite=True): """Download files in a github dir e.g https://github.com/google/fonts/tree/main/ofl/abhayalibre @@ -241,7 +237,7 @@ def download_file(url, dst_path=None): request = requests.get(url, stream=True) if not dst_path: return BytesIO(request.content) - with open(dst_path, 'wb') as downloaded_file: + with open(dst_path, "wb") as downloaded_file: downloaded_file.write(request.content) @@ -296,7 +292,7 @@ def _html_custom_formatter(string): strings = string.split("\n") # Cycle through list to find abbreviations for i in range(1, len(strings)): - this_line = strings[i-1] + this_line = strings[i - 1] next_line = strings[i] if this_line == "": continue @@ -313,7 +309,7 @@ def _html_custom_formatter(string): and next_line[1] == next_line[1].lower() ) # H.R. Giger ): - strings[i-1] = strings[i-1] + strings[i] + strings[i - 1] = strings[i - 1] + strings[i] strings[i] = "" # Join back together string = "\n".join(strings) @@ -328,6 +324,7 @@ def format_html(html): ## Font-related utility functions + def font_stylename(ttFont): """Get a font's stylename using the name table. Since our fonts use the RIBBI naming model, use the Typographic SubFamily Name (NAmeID 17) if it @@ -416,62 +413,66 @@ def _font_version(font, platEncLang=(3, 1, 0x409)): def partition_cmap(font, test, report=True): - """Drops all cmap tables from the font which do not pass the supplied test. - - Arguments: - font: A ``TTFont`` instance - test: A function which takes a cmap table and returns True if it should - be kept or False if it should be removed from the font. - report: Reports to stdout which tables were dropped and which were kept. - - Returns two lists: a list of `fontTools.ttLib.tables._c_m_a_p.*` objects - which were kept in the font, and a list of those which were removed.""" - keep = [] - drop = [] - - for index, table in enumerate(font['cmap'].tables): - if test(table): - keep.append(table) - else: - drop.append(table) - - if report: - for table in keep: - print(("Keeping format {} cmap subtable with Platform ID = {}" - " and Encoding ID = {}").format(table.format, - table.platformID, - table.platEncID)) - for table in drop: - print(("--- Removed format {} cmap subtable with Platform ID = {}" - " and Encoding ID = {} ---").format(table.format, - table.platformID, - table.platEncID)) - - font['cmap'].tables = keep - return keep, drop + """Drops all cmap tables from the font which do not pass the supplied test. + + Arguments: + font: A ``TTFont`` instance + test: A function which takes a cmap table and returns True if it should + be kept or False if it should be removed from the font. + report: Reports to stdout which tables were dropped and which were kept. + + Returns two lists: a list of `fontTools.ttLib.tables._c_m_a_p.*` objects + which were kept in the font, and a list of those which were removed.""" + keep = [] + drop = [] + + for index, table in enumerate(font["cmap"].tables): + if test(table): + keep.append(table) + else: + drop.append(table) + + if report: + for table in keep: + print( + ( + "Keeping format {} cmap subtable with Platform ID = {}" + " and Encoding ID = {}" + ).format(table.format, table.platformID, table.platEncID) + ) + for table in drop: + print( + ( + "--- Removed format {} cmap subtable with Platform ID = {}" + " and Encoding ID = {} ---" + ).format(table.format, table.platformID, table.platEncID) + ) + + font["cmap"].tables = keep + return keep, drop def _unicode_marks(string): - unicodemap = [(u'©', '(c)'), (u'®', '(r)'), (u'™', '(tm)')] + unicodemap = [("©", "(c)"), ("®", "(r)"), ("™", "(tm)")] return filter(lambda char: char[0] in string, unicodemap) def normalize_unicode_marks(string): - """ Converts special characters like copyright, - trademark signs to ascii name """ + """Converts special characters like copyright, + trademark signs to ascii name""" # print("input: '{}'".format(string)) input_string = string for mark, ascii_repl in _unicode_marks(string): string = string.replace(mark, ascii_repl) rv = [] -# for c in unicodedata.normalize('NFKC', smart_text(string)): - for c in unicodedata.normalize('NFKC', string): + # for c in unicodedata.normalize('NFKC', smart_text(string)): + for c in unicodedata.normalize("NFKC", string): # cat = unicodedata.category(c)[0] # if cat in 'LN' or c in ok: rv.append(c) - new = ''.join(rv).strip() + new = "".join(rv).strip() result = unidecode(new) if result != input_string: print("Fixed string: '{}'".format(result)) @@ -479,11 +480,11 @@ def normalize_unicode_marks(string): def get_fsSelection_byte2(ttfont): - return ttfont['OS/2'].fsSelection >> 8 + return ttfont["OS/2"].fsSelection >> 8 def get_fsSelection_byte1(ttfont): - return ttfont['OS/2'].fsSelection & 255 + return ttfont["OS/2"].fsSelection & 255 def get_encoded_glyphs(ttFont): @@ -492,8 +493,8 @@ def get_encoded_glyphs(ttFont): def get_unencoded_glyphs(font): - """ Check if font has unencoded glyphs """ - cmap = font['cmap'] + """Check if font has unencoded glyphs""" + cmap = font["cmap"] new_cmap = cmap.getcmap(3, 10) if not new_cmap: @@ -505,9 +506,8 @@ def get_unencoded_glyphs(font): if not new_cmap: return [] - diff = list(set(font.getGlyphOrder()) - - set(new_cmap.cmap.values()) - {'.notdef'}) - return [g for g in diff[:] if g != '.notdef'] + diff = list(set(font.getGlyphOrder()) - set(new_cmap.cmap.values()) - {".notdef"}) + return [g for g in diff[:] if g != ".notdef"] def has_mac_names(ttfont): @@ -515,7 +515,7 @@ def has_mac_names(ttfont): field values: platformID: 1, encodingID: 0, LanguageID: 0""" for i in range(255): - if ttfont['name'].getName(i, 1, 0, 0): + if ttfont["name"].getName(i, 1, 0, 0): return True return False @@ -537,6 +537,7 @@ def font_sample_text(ttFont): cmap = set(ttFont.getBestCmap()) words = [] seen_chars = set() + def _add_words(words, text, seen_chars): for word in text.split(): chars = set(ord(l) for l in word) @@ -574,27 +575,31 @@ def parse_axis_dflts(string): axes = string.split() res = {} for axis in axes: - k,v = axis.split("=") + k, v = axis.split("=") res[k] = float(v) return res def remove_url_prefix(url): """https://www.google.com --> google.com""" - pattern = r'(https?://)?(www\.)?' - cleaned_url = re.sub(pattern, '', url) + pattern = r"(https?://)?(www\.)?" + cleaned_url = re.sub(pattern, "", url) return cleaned_url def primary_script(ttFont, ignore_latin=True): - g = classifyGlyphs(lambda uv:list(ftunicodedata.script_extension(chr(uv))), ttFont.getBestCmap(), gsub=ttFont.get("GSUB")) + g = classifyGlyphs( + lambda uv: list(ftunicodedata.script_extension(chr(uv))), + ttFont.getBestCmap(), + gsub=ttFont.get("GSUB"), + ) badkeys = ["Zinh", "Zyyy", "Zzzz"] if ignore_latin: badkeys.append("Latn") for badkey in badkeys: if badkey in g: del g[badkey] - script_count = Counter({k:len(v) for k,v in g.items()}) + script_count = Counter({k: len(v) for k, v in g.items()}) # If there isn't a clear winner, give up if ( @@ -615,7 +620,7 @@ def autovivification(items): if isinstance(items, (float, int, str, bool)): return items d = defaultdict(lambda: defaultdict(defaultdict)) - d.update({k: autovivification(v) for k,v in items.items()}) + d.update({k: autovivification(v) for k, v in items.items()}) return d @@ -646,6 +651,7 @@ def open_ufo(path): return ufoLib2.Font.open(path) return False + # https://github.com/googlefonts/nanoemoji/blob/fb4b0b3e10f7197e7fe33c4ae6949841e4440397/src/nanoemoji/util.py#L167-L176 def shell_quote(s: Union[str, Path]) -> str: """Quote a string or pathlib.Path for use in a shell command.""" @@ -660,13 +666,13 @@ def shell_quote(s: Union[str, Path]) -> str: def github_user_repo(github_url): - pattern = r'https?://w?w?w?\.?github\.com/(?P[^/]+)/(?P[^/^.]+)' + pattern = r"https?://w?w?w?\.?github\.com/(?P[^/]+)/(?P[^/^.]+)" match = re.search(pattern, github_url) if not match: raise ValueError( f"Cannot extract github user and repo name from url '{github_url}'." ) - return match.group('user'), match.group('repo') + return match.group("user"), match.group("repo") def has_gh_token(): diff --git a/tests/push/test_items.py b/tests/push/test_items.py index 77581585..323277b4 100644 --- a/tests/push/test_items.py +++ b/tests/push/test_items.py @@ -17,108 +17,82 @@ FONTS_JSON = json.load(open(os.path.join(SERVER_DIR, "fonts.json"), encoding="utf8")) - @pytest.mark.parametrize( "type_, fp, gf_data, res", [ ( Family, TEST_FAMILY_DIR, - next(f for f in FONTS_JSON["familyMetadataList"] if f["family"] == "Maven Pro"), + next( + f + for f in FONTS_JSON["familyMetadataList"] + if f["family"] == "Maven Pro" + ), Family( name="Maven Pro", version="Version 2.102", - ) + ), ), ( FamilyMeta, TEST_FAMILY_DIR, FAMILY_JSON, FamilyMeta( - name='Maven Pro', - designer=['Joe Prince'], - license='ofl', - category='SANS_SERIF', - subsets=['latin', 'latin-ext', 'vietnamese'], - stroke='SANS_SERIF', + name="Maven Pro", + designer=["Joe Prince"], + license="ofl", + category="SANS_SERIF", + subsets=["latin", "latin-ext", "vietnamese"], + stroke="SANS_SERIF", classifications=[], - description='Maven Pro is a sans-serif typeface with unique ' - 'curvature and flowing rhythm. Its forms make it very ' - 'distinguishable and legible when in context. It blends ' - 'styles of many great typefaces and is suitable for any ' - 'design medium. Maven Pro’s modern design is great for ' - 'the web and fits in any environment. Updated in ' - 'January 2019 with a Variable Font "Weight" axis. The ' - 'Maven Pro project was initiated by Joe Price, a type ' - 'designer based in the USA. To contribute, see ' - 'github.com/googlefonts/mavenproFont', + description="Maven Pro is a sans-serif typeface with unique " + "curvature and flowing rhythm. Its forms make it very " + "distinguishable and legible when in context. It blends " + "styles of many great typefaces and is suitable for any " + "design medium. Maven Pro’s modern design is great for " + "the web and fits in any environment. Updated in " + 'January 2019 with a Variable Font "Weight" axis. The ' + "Maven Pro project was initiated by Joe Price, a type " + "designer based in the USA. To contribute, see " + "github.com/googlefonts/mavenproFont", primary_script=None, article=None, - minisite_url=None - ) + minisite_url=None, + ), ), ( Designer, DESIGNER_DIR, FAMILY_JSON["designers"][0], - Designer( - name="Joe Prince", - bio=None - ) + Designer(name="Joe Prince", bio=None), ), ( Axis, AXES_DIR / "weight.textproto", next(a for a in FONTS_JSON["axisRegistry"] if a["tag"] == "wght"), Axis( - tag='wght', - display_name='Weight', + tag="wght", + display_name="Weight", min_value=1.0, default_value=400.0, max_value=1000.0, precision=0, fallback=[ - AxisFallback( - name='Thin', - value=100.0 - ), - AxisFallback( - name='ExtraLight', - value=200.0 - ), - AxisFallback( - name='Light', - value=300.0 - ), - AxisFallback( - name='Regular', - value=400.0 - ), - AxisFallback( - name='Medium', - value=500.0 - ), - AxisFallback( - name='SemiBold', - value=600.0 - ), - AxisFallback( - name='Bold', - value=700. - ), - AxisFallback( - name='ExtraBold', - value=800.0 - ), - AxisFallback( - name='Black', - value=900.0 - ) + AxisFallback(name="Thin", value=100.0), + AxisFallback(name="ExtraLight", value=200.0), + AxisFallback(name="Light", value=300.0), + AxisFallback(name="Regular", value=400.0), + AxisFallback(name="Medium", value=500.0), + AxisFallback(name="SemiBold", value=600.0), + AxisFallback(name="Bold", value=700.0), + AxisFallback(name="ExtraBold", value=800.0), + AxisFallback(name="Black", value=900.0), ], fallback_only=False, - description='Adjust the style from lighter to bolder in typographic color, by varying stroke weights, spacing and kerning, and other aspects of the type. This typically changes overall width, and so may be used in conjunction with Width and Grade axes.') - ) - ] + description="Adjust the style from lighter to bolder in typographic color, by varying stroke weights, spacing and kerning, and other aspects of the type. This typically changes overall width, and so may be used in conjunction with Width and Grade axes.", + ), + ), + ], ) def test_item_from_fp_and_gf_data(type_, fp, gf_data, res): assert type_.from_fp(fp) == type_.from_gf_json(gf_data) == res diff --git a/tests/push/test_servers.py b/tests/push/test_servers.py index 44b32893..248cb748 100644 --- a/tests/push/test_servers.py +++ b/tests/push/test_servers.py @@ -7,7 +7,7 @@ "dev": {"families": {"Abel": {"name": "Abel", "version": "1.000"}}}, "sandbox": {"families": {"Abel": {"name": "Abel", "version": "0.999"}}}, "production": {"families": {"Abel": {"name": "Abel", "version": "0.999"}}}, - "last_checked": "2023-01-01" + "last_checked": "2023-01-01", } @@ -29,16 +29,16 @@ def test_iter(servers): "item, res", [ ( - Family("Abel", "1.000"), + Family("Abel", "1.000"), { "name": "Abel", "version": "1.000", "In dev": True, "In sandbox": False, - "In production": False - } + "In production": False, + }, ) - ] + ], ) def test_compare_items(servers, item, res): # TODO may be worth using a dataclass instead of dict @@ -56,13 +56,25 @@ def server(): # Test on a family which isn't updated regularly. We should # probably use mocks at some point ("update_family", "Allan", Family("Allan", "Version 1.002")), - ("update_family_designers", "Allan", Designer(name='Anton Koovit', bio=None)), - ("update_metadata", "Allan", FamilyMeta(name='Allan', designer=['Anton Koovit'], license='ofl', category='DISPLAY', subsets=['latin', 'latin-ext'], stroke='SERIF', classifications=['display'], description='Once Allan was a sign painter in Berlin. Grey paneling work in the subway, bad materials, a city split in two. Now things have changed. His (character) palette of activities have expanded tremendously: he happily spends time traveling, experimenting in the gastronomic field, all kinds of festivities are no longer foreign to him. He comes with alternate features, and hints. A typeface suited for bigger sizes and display use. Truly a type that you like to see!')) - ] + ("update_family_designers", "Allan", Designer(name="Anton Koovit", bio=None)), + ( + "update_metadata", + "Allan", + FamilyMeta( + name="Allan", + designer=["Anton Koovit"], + license="ofl", + category="DISPLAY", + subsets=["latin", "latin-ext"], + stroke="SERIF", + classifications=["display"], + description="Once Allan was a sign painter in Berlin. Grey paneling work in the subway, bad materials, a city split in two. Now things have changed. His (character) palette of activities have expanded tremendously: he happily spends time traveling, experimenting in the gastronomic field, all kinds of festivities are no longer foreign to him. He comes with alternate features, and hints. A typeface suited for bigger sizes and display use. Truly a type that you like to see!", + ), + ), + ], ) def test_update_server(server, method, family_name, res): assert server.find_item(res) == None funcc = getattr(server, method) funcc(family_name) assert server.find_item(res) == res - diff --git a/tests/push/test_trafficjam.py b/tests/push/test_trafficjam.py index eb9a425c..40fd4ee1 100644 --- a/tests/push/test_trafficjam.py +++ b/tests/push/test_trafficjam.py @@ -1,6 +1,12 @@ import pytest import operator -from gftools.push.trafficjam import PushItem, PushItems, PushCategory, PushStatus, PushList +from gftools.push.trafficjam import ( + PushItem, + PushItems, + PushCategory, + PushStatus, + PushList, +) from pathlib import Path import os diff --git a/tests/test_builder.py b/tests/test_builder.py index 231e4f5d..de204c54 100644 --- a/tests/test_builder.py +++ b/tests/test_builder.py @@ -30,7 +30,7 @@ os.path.join("webfonts", "TestFamily-Black.woff2"), os.path.join("webfonts", "TestFamily-Regular.woff2"), os.path.join("webfonts", "TestFamily-Thin.woff2"), - ] + ], ), # Family consists of ufos which are not MM compatible. Tests # https://github.com/googlefonts/gftools/pull/669 @@ -50,9 +50,11 @@ os.path.join(TEST_DIR, "recipeprovider_noto"), [ os.path.join("TestFamily", "unhinted", "ttf", "TestFamily-Regular.ttf"), - os.path.join("TestFamily", "googlefonts", "ttf", "TestFamily-Black.ttf"), + os.path.join( + "TestFamily", "googlefonts", "ttf", "TestFamily-Black.ttf" + ), ], - ) + ), ], ) def test_builder(fp, font_paths): @@ -128,8 +130,6 @@ def test_builder_glyphData(fp, font_paths): def test_bad_configs(): - config = { - "Sources": ["foo.glyphs"] - } + config = {"Sources": ["foo.glyphs"]} with pytest.raises(ValueError): GFBuilder(config) diff --git a/tests/test_fix.py b/tests/test_fix.py index 099fd925..bbffd84d 100644 --- a/tests/test_fix.py +++ b/tests/test_fix.py @@ -23,7 +23,7 @@ def var_font(): def var_fonts(): paths = [ os.path.join(TEST_DATA, "Raleway[wght].ttf"), - os.path.join(TEST_DATA, "Raleway-Italic[wght].ttf") + os.path.join(TEST_DATA, "Raleway-Italic[wght].ttf"), ] return [TTFont(p) for p in paths] @@ -59,7 +59,7 @@ def test_add_dummy_dsig(static_font): def test_fix_hinted_font(static_font): static_font["head"].flags &= ~(1 << 3) assert static_font["head"].flags & (1 << 3) != (1 << 3) - static_font['fpgm'] = newTable("fpgm") + static_font["fpgm"] = newTable("fpgm") fix_hinted_font(static_font) assert static_font["head"].flags & (1 << 3) == (1 << 3) @@ -111,10 +111,8 @@ def test_fix_fs_type(static_font): ("12pt Italic", 400, (1 << 0), (1 << 1)), ] -@pytest.mark.parametrize( - STYLE_HEADERS, - STYLE_TABLE -) + +@pytest.mark.parametrize(STYLE_HEADERS, STYLE_TABLE) def test_fix_weight_class(static_font, style, weight_class, fs_selection, mac_style): name = static_font["name"] name.setName(style, 2, 3, 1, 0x409) @@ -133,10 +131,7 @@ def test_unknown_weight_class(static_font): fix_weight_class(static_font) -@pytest.mark.parametrize( - STYLE_HEADERS, - STYLE_TABLE -) +@pytest.mark.parametrize(STYLE_HEADERS, STYLE_TABLE) def test_fs_selection(static_font, style, weight_class, fs_selection, mac_style): # disable fsSelection bits above 6 for i in range(7, 12): @@ -148,10 +143,7 @@ def test_fs_selection(static_font, style, weight_class, fs_selection, mac_style) assert static_font["OS/2"].fsSelection == fs_selection -@pytest.mark.parametrize( - STYLE_HEADERS, - STYLE_TABLE -) +@pytest.mark.parametrize(STYLE_HEADERS, STYLE_TABLE) def test_fix_mac_style(static_font, style, weight_class, fs_selection, mac_style): name = static_font["name"] name.setName(style, 2, 3, 1, 0x409) @@ -244,12 +236,13 @@ def test_fix_vertical_metrics_typo_metrics_enabled(static_fonts): [ (os.path.join(TEST_DATA, "CairoPlay[slnt,wght]-no-empty-glyphs.ttf")), (os.path.join(TEST_DATA, "CairoPlay[slnt,wght]-gid1-not-empty.ttf")), - ] + ], ) def test_fix_colr_v0_font(font_path): # Fix a COLR v0 font. # maximum_color should not be run and GID 1 should have a blank glyph from gftools.fix import fix_colr_font + font = TTFont(font_path) gid1 = font.getGlyphOrder()[1] @@ -281,7 +274,10 @@ def test_ofl_license_strings(static_font): from gftools.constants import OFL_LICENSE_INFO, OFL_LICENSE_URL for id in (13, 14): - assert "http://scripts.sil.org/OFL" in static_font["name"].getName(id, 3, 1, 0x409).toUnicode() + assert ( + "http://scripts.sil.org/OFL" + in static_font["name"].getName(id, 3, 1, 0x409).toUnicode() + ) fix_license_strings(static_font) for id, expected in ((13, OFL_LICENSE_INFO), (14, OFL_LICENSE_URL)): - assert expected == static_font["name"].getName(id, 3, 1, 0x409).toUnicode() \ No newline at end of file + assert expected == static_font["name"].getName(id, 3, 1, 0x409).toUnicode() diff --git a/tests/test_gfgithub.py b/tests/test_gfgithub.py index 661af206..729e8b76 100644 --- a/tests/test_gfgithub.py +++ b/tests/test_gfgithub.py @@ -9,7 +9,7 @@ (6779, 3), (2987, 178), (6787, 568), - ] + ], ) def test_pr_files(pr_number, file_count): client = GitHubClient("google", "fonts") diff --git a/tests/test_instancer.py b/tests/test_instancer.py index 6ca634b1..3447001f 100644 --- a/tests/test_instancer.py +++ b/tests/test_instancer.py @@ -13,7 +13,7 @@ def var_font(): def _name_record(ttFont, nameID): - nametable = ttFont['name'] + nametable = ttFont["name"] record = nametable.getName(nameID, 3, 1, 0x409) if record: return record.toUnicode() @@ -27,10 +27,10 @@ def test_gen_static_font(var_font): assert _name_record(static_font, 16) == "Inconsolata Condensed" assert _name_record(static_font, 17) == "SemiBold" - assert static_font['OS/2'].usWeightClass == 600 - assert static_font['OS/2'].usWidthClass == 5 - assert static_font['OS/2'].fsSelection & (1 << 6) - assert static_font['head'].macStyle == 0 + assert static_font["OS/2"].usWeightClass == 600 + assert static_font["OS/2"].usWidthClass == 5 + assert static_font["OS/2"].fsSelection & (1 << 6) + assert static_font["head"].macStyle == 0 def test_gen_static_font_custom_names(var_font): @@ -42,7 +42,9 @@ def test_gen_static_font_custom_names(var_font): def test_gen_static_font_custom_names_without_declaring_wght(var_font): - static_font = gen_static_font(var_font, {"wght": 900}, "Custom Family", "8pt SemiCondensed") + static_font = gen_static_font( + var_font, {"wght": 900}, "Custom Family", "8pt SemiCondensed" + ) assert _name_record(static_font, 1) == "Custom Family 8pt SemiCondensed" assert _name_record(static_font, 2) == "Regular" assert _name_record(static_font, 16) == None @@ -50,7 +52,9 @@ def test_gen_static_font_custom_names_without_declaring_wght(var_font): def test_gen_static_font_custom_names_ribbi(var_font): - static_font = gen_static_font(var_font, {"wght": 900}, "Custom Family", "8pt SemiCondensed Bold Italic") + static_font = gen_static_font( + var_font, {"wght": 900}, "Custom Family", "8pt SemiCondensed Bold Italic" + ) assert _name_record(static_font, 1) == "Custom Family 8pt SemiCondensed" assert _name_record(static_font, 2) == "Bold Italic" assert _name_record(static_font, 16) == None @@ -58,8 +62,10 @@ def test_gen_static_font_custom_names_ribbi(var_font): def test_gen_static_font_custom_names_non_ribbi(var_font): - static_font = gen_static_font(var_font, {"wght": 900}, "Custom Family", "8pt SemiCondensed Medium") + static_font = gen_static_font( + var_font, {"wght": 900}, "Custom Family", "8pt SemiCondensed Medium" + ) assert _name_record(static_font, 1) == "Custom Family 8pt SemiCondensed Medium" assert _name_record(static_font, 2) == "Regular" assert _name_record(static_font, 16) == "Custom Family 8pt SemiCondensed" - assert _name_record(static_font, 17) == "Medium" \ No newline at end of file + assert _name_record(static_font, 17) == "Medium" diff --git a/tests/test_usage.py b/tests/test_usage.py index 4df50e4b..a6dc377c 100644 --- a/tests/test_usage.py +++ b/tests/test_usage.py @@ -25,20 +25,26 @@ CWD = os.path.dirname(__file__) TEST_DIR = os.path.join(CWD, "..", "data", "test") + class TestGFToolsScripts(unittest.TestCase): """Functional tests to determine whether each script can execute successfully""" + def setUp(self): - self.example_dir = os.path.join(TEST_DIR, 'cabin') - self.example_font = os.path.join(self.example_dir, 'Cabin-Regular.ttf') + self.example_dir = os.path.join(TEST_DIR, "cabin") + self.example_font = os.path.join(self.example_dir, "Cabin-Regular.ttf") self.example_family = glob(os.path.join(TEST_DIR, "mavenpro", "*.ttf")) - self.example_vf_font = os.path.join(TEST_DIR, 'Lora-Roman-VF.ttf') - self.example_vf_stat = os.path.join(TEST_DIR, 'lora_stat.yaml') - self.example_glyphs_file = os.path.join(TEST_DIR, 'Lora.glyphs') - self.example_builder_config = os.path.join(TEST_DIR, 'builder_test.yaml') - self.example_builder_config_2_sources = os.path.join(TEST_DIR, "Libre-Bodoni", "sources", "config.yaml") + self.example_vf_font = os.path.join(TEST_DIR, "Lora-Roman-VF.ttf") + self.example_vf_stat = os.path.join(TEST_DIR, "lora_stat.yaml") + self.example_glyphs_file = os.path.join(TEST_DIR, "Lora.glyphs") + self.example_builder_config = os.path.join(TEST_DIR, "builder_test.yaml") + self.example_builder_config_2_sources = os.path.join( + TEST_DIR, "Libre-Bodoni", "sources", "config.yaml" + ) self.src_vtt_font = os.path.join(TEST_DIR, "Inconsolata[wdth,wght].ttf") - self.gf_family_dir = os.path.join('data', 'test', 'mock_googlefonts', 'ofl', 'abel') - self.nam_file = os.path.join('data', 'test', 'arabic_unique-glyphs.nam') + self.gf_family_dir = os.path.join( + "data", "test", "mock_googlefonts", "ofl", "abel" + ) + self.nam_file = os.path.join("data", "test", "arabic_unique-glyphs.nam") self.dir_before_tests = os.listdir(self.example_dir) def tearDown(self): @@ -49,124 +55,154 @@ def tearDown(self): def test_add_font(self): from gftools.scripts.add_font import main + main([self.gf_family_dir]) def test_build_ofl(self): from gftools.scripts.build_ofl import main from tempfile import TemporaryDirectory + with TemporaryDirectory() as tmp_dir: main([self.example_font, tmp_dir]) def test_check_bbox(self): from gftools.scripts.check_bbox import main - main([self.example_font, '--glyphs', '--extremes']) + + main([self.example_font, "--glyphs", "--extremes"]) def test_check_copyright_notices(self): from gftools.scripts.check_copyright_notices import main + main([self.example_font]) def test_check_font_version(self): from gftools.scripts.check_font_version import main + main(["Cabin"]) def test_check_name(self): from gftools.scripts.check_name import main + main([self.example_font]) def test_check_vtt_compatibility(self): from gftools.scripts.check_vtt_compatibility import main + main([self.example_font, self.example_font]) def test_compare_font(self): from gftools.scripts.compare_font import main + main([self.example_font, self.example_font]) def test_find_features(self): from gftools.scripts.find_features import main + main([self.example_font]) def test_fix_ascii_fontmetadata(self): from gftools.scripts.fix_ascii_fontmetadata import main + main([self.example_font]) def test_fix_cmap(self): from gftools.scripts.fix_cmap import main + main([self.example_font]) def test_fix_familymetadata(self): from gftools.scripts.fix_familymetadata import main + main([self.example_font]) def test_fix_fsselection(self): from gftools.scripts.fix_fsselection import main + main([self.example_font]) def test_fix_fstype(self): from gftools.scripts.fix_fstype import main + main([self.example_font]) def test_fix_gasp(self): from gftools.scripts.fix_gasp import main + main([self.example_font]) def test_fix_glyph_private_encoding(self): from gftools.scripts.fix_glyph_private_encoding import main + main([self.example_font]) def test_fix_glyphs(self): from gftools.scripts.fix_glyphs import main + main([self.example_glyphs_file]) def test_fix_hinting(self): from gftools.scripts.fix_hinting import main + main([self.example_font]) def test_fix_isfixedpitch(self): from gftools.scripts.fix_isfixedpitch import main + main(["--fonts", self.example_font]) def test_fix_nameids(self): from gftools.scripts.fix_nameids import main + main([self.example_font]) def test_fix_nonhinting(self): from gftools.scripts.fix_nonhinting import main - main([self.example_font, self.example_font + '.fix']) + + main([self.example_font, self.example_font + ".fix"]) def test_fix_ttfautohint(self): from gftools.scripts.fix_ttfautohint import main + main([self.example_font]) def test_fix_vendorid(self): from gftools.scripts.fix_vendorid import main + main([self.example_font]) def test_fix_vertical_metrics(self): from gftools.scripts.fix_vertical_metrics import main + main([self.example_font]) def test_font_diff(self): from gftools.scripts.font_diff import main + main([self.example_font, self.example_font]) def test_font_weights_coverage(self): from gftools.scripts.font_weights_coverage import main + main([self.example_dir]) def test_fix_font(self): from gftools.scripts.fix_font import main + main([self.example_font]) def test_fix_family(self): from gftools.scripts.fix_family import main + main(self.example_family) def test_list_italicangle(self): from gftools.scripts.list_italicangle import main + main([self.example_font]) def test_list_panose(self): from gftools.scripts.list_panose import main + main([self.example_font]) # def test_list_variable_source(self): @@ -174,48 +210,60 @@ def test_list_panose(self): def test_list_weightclass(self): from gftools.scripts.list_weightclass import main + main([self.example_font]) def test_list_widthclass(self): from gftools.scripts.list_widthclass import main + main([self.example_font]) def test_nametable_from_filename(self): from gftools.scripts.nametable_from_filename import main + main([self.example_font]) def test_ots(self): from gftools.scripts.ots import main + main([self.example_dir]) def test_rangify(self): from gftools.scripts.rangify import main + main([self.nam_file]) def test_ttf2cp(self): from gftools.scripts.ttf2cp import main + main([self.example_font]) def test_unicode_names(self): from gftools.scripts.unicode_names import main + main(["--nam_file", self.nam_file]) def test_update_families(self): from gftools.scripts.update_families import main + main([self.example_font]) def test_update_version(self): from gftools.scripts.update_version import main + main(["--old_version", "2.00099", "--new_version", "2.001", self.example_font]) def test_varfont_info(self): from gftools.scripts.varfont_info import main + main([self.example_vf_font]) def test_what_subsets(self): from gftools.scripts.what_subsets import main + main([self.example_font]) + # def test_rename_font(self): # from gftools.scripts.rename-font'), self.example_font, "Foobar"]) # # Temporarily disabling this until we close issue #13 @@ -242,10 +290,10 @@ def test_what_subsets(self): # def test_builder(self): # from gftools.scripts.builder'), self.example_builder_config]) - + # def test_builder_2_sources(self): # self.check_script(["python", self.get_path("builder"), self.example_builder_config_2_sources]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_utils.py b/tests/test_utils.py index c7e7385b..80c417aa 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -11,10 +11,11 @@ ("http://google.com", "google.com"), ("google.com", "google.com"), ("", ""), - ] + ], ) def test_remove_url_prefix(url, want): from gftools.utils import remove_url_prefix + got = remove_url_prefix(url) assert got == want @@ -62,16 +63,26 @@ def test_format_html(): @pytest.mark.parametrize( """url,want""", [ - ("https://github.com/SorkinType/SASchoolHandAustralia", ("SorkinType", "SASchoolHandAustralia")), - ("https://github.com/SorkinType/SASchoolHandAustralia/", ("SorkinType", "SASchoolHandAustralia")), + ( + "https://github.com/SorkinType/SASchoolHandAustralia", + ("SorkinType", "SASchoolHandAustralia"), + ), + ( + "https://github.com/SorkinType/SASchoolHandAustralia/", + ("SorkinType", "SASchoolHandAustralia"), + ), ("https://github.com/googlefonts/MavenPro//", ("googlefonts", "MavenPro")), ("https://github.com/googlefonts/MavenPro.git", ("googlefonts", "MavenPro")), - ("https://www.github.com/googlefonts/MavenPro.git", ("googlefonts", "MavenPro")), + ( + "https://www.github.com/googlefonts/MavenPro.git", + ("googlefonts", "MavenPro"), + ), ("http://www.github.com/googlefonts/MavenPro.git", ("googlefonts", "MavenPro")), - ] + ], ) def test_github_user_repo(url, want): from gftools.utils import github_user_repo + assert github_user_repo(url) == want