1#!/usr/bin/env python 2 3import collections 4import copy 5import glob 6import itertools 7from os import path 8import sys 9from xml.etree import ElementTree 10 11from fontTools import ttLib 12 13EMOJI_VS = 0xFE0F 14 15LANG_TO_SCRIPT = { 16 'as': 'Beng', 17 'bn': 'Beng', 18 'cy': 'Latn', 19 'da': 'Latn', 20 'de': 'Latn', 21 'en': 'Latn', 22 'es': 'Latn', 23 'et': 'Latn', 24 'eu': 'Latn', 25 'fr': 'Latn', 26 'ga': 'Latn', 27 'gu': 'Gujr', 28 'hi': 'Deva', 29 'hr': 'Latn', 30 'hu': 'Latn', 31 'hy': 'Armn', 32 'ja': 'Jpan', 33 'kn': 'Knda', 34 'ko': 'Kore', 35 'ml': 'Mlym', 36 'mn': 'Cyrl', 37 'mr': 'Deva', 38 'nb': 'Latn', 39 'nn': 'Latn', 40 'or': 'Orya', 41 'pa': 'Guru', 42 'pt': 'Latn', 43 'sl': 'Latn', 44 'ta': 'Taml', 45 'te': 'Telu', 46 'tk': 'Latn', 47} 48 49def lang_to_script(lang_code): 50 lang = lang_code.lower() 51 while lang not in LANG_TO_SCRIPT: 52 hyphen_idx = lang.rfind('-') 53 assert hyphen_idx != -1, ( 54 'We do not know what script the "%s" language is written in.' 55 % lang_code) 56 assumed_script = lang[hyphen_idx+1:] 57 if len(assumed_script) == 4 and assumed_script.isalpha(): 58 # This is actually the script 59 return assumed_script.title() 60 lang = lang[:hyphen_idx] 61 return LANG_TO_SCRIPT[lang] 62 63 64def printable(inp): 65 if type(inp) is set: # set of character sequences 66 return '{' + ', '.join([printable(seq) for seq in inp]) + '}' 67 if type(inp) is tuple: # character sequence 68 return '<' + (', '.join([printable(ch) for ch in inp])) + '>' 69 else: # single character 70 return 'U+%04X' % inp 71 72 73def open_font(font): 74 font_file, index = font 75 font_path = path.join(_fonts_dir, font_file) 76 if index is not None: 77 return ttLib.TTFont(font_path, fontNumber=index) 78 else: 79 return ttLib.TTFont(font_path) 80 81 82def get_best_cmap(font): 83 ttfont = open_font(font) 84 all_unicode_cmap = None 85 bmp_cmap = None 86 for cmap in ttfont['cmap'].tables: 87 specifier = (cmap.format, cmap.platformID, cmap.platEncID) 88 if specifier == (4, 3, 1): 89 assert bmp_cmap is None, 'More than one BMP cmap in %s' % (font, ) 90 bmp_cmap = cmap 91 elif specifier == (12, 3, 10): 92 assert all_unicode_cmap is None, ( 93 'More than one UCS-4 cmap in %s' % (font, )) 94 all_unicode_cmap = cmap 95 96 return all_unicode_cmap.cmap if all_unicode_cmap else bmp_cmap.cmap 97 98 99def get_variation_sequences_cmap(font): 100 ttfont = open_font(font) 101 vs_cmap = None 102 for cmap in ttfont['cmap'].tables: 103 specifier = (cmap.format, cmap.platformID, cmap.platEncID) 104 if specifier == (14, 0, 5): 105 assert vs_cmap is None, 'More than one VS cmap in %s' % (font, ) 106 vs_cmap = cmap 107 return vs_cmap 108 109 110def get_emoji_map(font): 111 # Add normal characters 112 emoji_map = copy.copy(get_best_cmap(font)) 113 reverse_cmap = {glyph: code for code, glyph in emoji_map.items()} 114 115 # Add variation sequences 116 vs_dict = get_variation_sequences_cmap(font).uvsDict 117 for vs in vs_dict: 118 for base, glyph in vs_dict[vs]: 119 if glyph is None: 120 emoji_map[(base, vs)] = emoji_map[base] 121 else: 122 emoji_map[(base, vs)] = glyph 123 124 # Add GSUB rules 125 ttfont = open_font(font) 126 for lookup in ttfont['GSUB'].table.LookupList.Lookup: 127 assert lookup.LookupType == 4, 'We only understand type 4 lookups' 128 for subtable in lookup.SubTable: 129 ligatures = subtable.ligatures 130 for first_glyph in ligatures: 131 for ligature in ligatures[first_glyph]: 132 sequence = [first_glyph] + ligature.Component 133 sequence = [reverse_cmap[glyph] for glyph in sequence] 134 sequence = tuple(sequence) 135 # Make sure no starting subsequence of 'sequence' has been 136 # seen before. 137 for sub_len in range(2, len(sequence)+1): 138 subsequence = sequence[:sub_len] 139 assert subsequence not in emoji_map 140 emoji_map[sequence] = ligature.LigGlyph 141 142 return emoji_map 143 144 145def assert_font_supports_any_of_chars(font, chars): 146 best_cmap = get_best_cmap(font) 147 for char in chars: 148 if char in best_cmap: 149 return 150 sys.exit('None of characters in %s were found in %s' % (chars, font)) 151 152 153def assert_font_supports_all_of_chars(font, chars): 154 best_cmap = get_best_cmap(font) 155 for char in chars: 156 assert char in best_cmap, ( 157 'U+%04X was not found in %s' % (char, font)) 158 159 160def assert_font_supports_none_of_chars(font, chars): 161 best_cmap = get_best_cmap(font) 162 for char in chars: 163 assert char not in best_cmap, ( 164 'U+%04X was found in %s' % (char, font)) 165 166 167def assert_font_supports_all_sequences(font, sequences): 168 vs_dict = get_variation_sequences_cmap(font).uvsDict 169 for base, vs in sorted(sequences): 170 assert vs in vs_dict and (base, None) in vs_dict[vs], ( 171 '<U+%04X, U+%04X> was not found in %s' % (base, vs, font)) 172 173 174def check_hyphens(hyphens_dir): 175 # Find all the scripts that need automatic hyphenation 176 scripts = set() 177 for hyb_file in glob.iglob(path.join(hyphens_dir, '*.hyb')): 178 hyb_file = path.basename(hyb_file) 179 assert hyb_file.startswith('hyph-'), ( 180 'Unknown hyphenation file %s' % hyb_file) 181 lang_code = hyb_file[hyb_file.index('-')+1:hyb_file.index('.')] 182 scripts.add(lang_to_script(lang_code)) 183 184 HYPHENS = {0x002D, 0x2010} 185 for script in scripts: 186 fonts = _script_to_font_map[script] 187 assert fonts, 'No fonts found for the "%s" script' % script 188 for font in fonts: 189 assert_font_supports_any_of_chars(font, HYPHENS) 190 191 192class FontRecord(object): 193 def __init__(self, name, scripts, variant, weight, style, font): 194 self.name = name 195 self.scripts = scripts 196 self.variant = variant 197 self.weight = weight 198 self.style = style 199 self.font = font 200 201 202def parse_fonts_xml(fonts_xml_path): 203 global _script_to_font_map, _fallback_chain 204 _script_to_font_map = collections.defaultdict(set) 205 _fallback_chain = [] 206 tree = ElementTree.parse(fonts_xml_path) 207 for family in tree.findall('family'): 208 name = family.get('name') 209 variant = family.get('variant') 210 langs = family.get('lang') 211 if name: 212 assert variant is None, ( 213 'No variant expected for LGC font %s.' % name) 214 assert langs is None, ( 215 'No language expected for LGC fonts %s.' % name) 216 else: 217 assert variant in {None, 'elegant', 'compact'}, ( 218 'Unexpected value for variant: %s' % variant) 219 220 if langs: 221 langs = langs.split() 222 scripts = {lang_to_script(lang) for lang in langs} 223 else: 224 scripts = set() 225 226 for child in family: 227 assert child.tag == 'font', ( 228 'Unknown tag <%s>' % child.tag) 229 font_file = child.text 230 weight = int(child.get('weight')) 231 assert weight % 100 == 0, ( 232 'Font weight "%d" is not a multiple of 100.' % weight) 233 234 style = child.get('style') 235 assert style in {'normal', 'italic'}, ( 236 'Unknown style "%s"' % style) 237 238 index = child.get('index') 239 if index: 240 index = int(index) 241 242 _fallback_chain.append(FontRecord( 243 name, 244 frozenset(scripts), 245 variant, 246 weight, 247 style, 248 (font_file, index))) 249 250 if name: # non-empty names are used for default LGC fonts 251 map_scripts = {'Latn', 'Grek', 'Cyrl'} 252 else: 253 map_scripts = scripts 254 for script in map_scripts: 255 _script_to_font_map[script].add((font_file, index)) 256 257 258def check_emoji_coverage(all_emoji, equivalent_emoji): 259 emoji_font = get_emoji_font() 260 check_emoji_font_coverage(emoji_font, all_emoji, equivalent_emoji) 261 262 263def get_emoji_font(): 264 emoji_fonts = [ 265 record.font for record in _fallback_chain 266 if 'Zsye' in record.scripts] 267 assert len(emoji_fonts) == 1, 'There are %d emoji fonts.' % len(emoji_fonts) 268 return emoji_fonts[0] 269 270 271def check_emoji_font_coverage(emoji_font, all_emoji, equivalent_emoji): 272 coverage = get_emoji_map(emoji_font) 273 for sequence in all_emoji: 274 assert sequence in coverage, ( 275 '%s is not supported in the emoji font.' % printable(sequence)) 276 277 # disable temporarily - we cover more than this 278 """ 279 for sequence in coverage: 280 if sequence in {0x0000, 0x000D, 0x0020}: 281 # The font needs to support a few extra characters, which is OK 282 continue 283 assert sequence in all_emoji, ( 284 'Emoji font should not support %s.' % printable(sequence)) 285 """ 286 287 for first, second in sorted(equivalent_emoji.items()): 288 assert coverage[first] == coverage[second], ( 289 '%s and %s should map to the same glyph.' % ( 290 printable(first), 291 printable(second))) 292 293 # disable temporarily - some equivalent sequences we don't even know about 294 """ 295 for glyph in set(coverage.values()): 296 maps_to_glyph = [seq for seq in coverage if coverage[seq] == glyph] 297 if len(maps_to_glyph) > 1: 298 # There are more than one sequences mapping to the same glyph. We 299 # need to make sure they were expected to be equivalent. 300 equivalent_seqs = set() 301 for seq in maps_to_glyph: 302 equivalent_seq = seq 303 while equivalent_seq in equivalent_emoji: 304 equivalent_seq = equivalent_emoji[equivalent_seq] 305 equivalent_seqs.add(equivalent_seq) 306 assert len(equivalent_seqs) == 1, ( 307 'The sequences %s should not result in the same glyph %s' % ( 308 printable(equivalent_seqs), 309 glyph)) 310 """ 311 312def check_emoji_defaults(default_emoji): 313 missing_text_chars = _emoji_properties['Emoji'] - default_emoji 314 emoji_font_seen = False 315 for record in _fallback_chain: 316 if 'Zsye' in record.scripts: 317 emoji_font_seen = True 318 # No need to check the emoji font 319 continue 320 # For later fonts, we only check them if they have a script 321 # defined, since the defined script may get them to a higher 322 # score even if they appear after the emoji font. 323 if emoji_font_seen and not record.scripts: 324 continue 325 326 # Check default emoji-style characters 327 assert_font_supports_none_of_chars(record.font, sorted(default_emoji)) 328 329 # Mark default text-style characters appearing in fonts above the emoji 330 # font as seen 331 if not emoji_font_seen: 332 missing_text_chars -= set(get_best_cmap(record.font)) 333 334 # Noto does not have monochrome glyphs for Unicode 7.0 wingdings and 335 # webdings yet. 336 missing_text_chars -= _chars_by_age['7.0'] 337 # TODO: Remove these after b/26113320 is fixed 338 missing_text_chars -= { 339 0x263A, # WHITE SMILING FACE 340 0x270C, # VICTORY HAND 341 0x2744, # SNOWFLAKE 342 0x2764, # HEAVY BLACK HEART 343 } 344 assert missing_text_chars == set(), ( 345 'Text style version of some emoji characters are missing: ' + repr(missing_text_chars)) 346 347 348# Setting reverse to true returns a dictionary that maps the values to sets of 349# characters, useful for some binary properties. Otherwise, we get a 350# dictionary that maps characters to the property values, assuming there's only 351# one property in the file. 352def parse_unicode_datafile(file_path, reverse=False): 353 if reverse: 354 output_dict = collections.defaultdict(set) 355 else: 356 output_dict = {} 357 with open(file_path) as datafile: 358 for line in datafile: 359 if '#' in line: 360 line = line[:line.index('#')] 361 line = line.strip() 362 if not line: 363 continue 364 365 chars, prop = line.split(';') 366 chars = chars.strip() 367 prop = prop.strip() 368 369 if ' ' in chars: # character sequence 370 sequence = [int(ch, 16) for ch in chars.split(' ')] 371 additions = [tuple(sequence)] 372 elif '..' in chars: # character range 373 char_start, char_end = chars.split('..') 374 char_start = int(char_start, 16) 375 char_end = int(char_end, 16) 376 additions = xrange(char_start, char_end+1) 377 else: # singe character 378 additions = [int(chars, 16)] 379 if reverse: 380 output_dict[prop].update(additions) 381 else: 382 for addition in additions: 383 assert addition not in output_dict 384 output_dict[addition] = prop 385 return output_dict 386 387 388def parse_standardized_variants(file_path): 389 emoji_set = set() 390 text_set = set() 391 with open(file_path) as datafile: 392 for line in datafile: 393 if '#' in line: 394 line = line[:line.index('#')] 395 line = line.strip() 396 if not line: 397 continue 398 sequence, description, _ = line.split(';') 399 sequence = sequence.strip().split(' ') 400 base = int(sequence[0], 16) 401 vs = int(sequence[1], 16) 402 description = description.strip() 403 if description == 'text style': 404 text_set.add((base, vs)) 405 elif description == 'emoji style': 406 emoji_set.add((base, vs)) 407 return text_set, emoji_set 408 409 410def parse_ucd(ucd_path): 411 global _emoji_properties, _chars_by_age 412 global _text_variation_sequences, _emoji_variation_sequences 413 global _emoji_sequences, _emoji_zwj_sequences 414 _emoji_properties = parse_unicode_datafile( 415 path.join(ucd_path, 'emoji-data.txt'), reverse=True) 416 _chars_by_age = parse_unicode_datafile( 417 path.join(ucd_path, 'DerivedAge.txt'), reverse=True) 418 sequences = parse_standardized_variants( 419 path.join(ucd_path, 'StandardizedVariants.txt')) 420 _text_variation_sequences, _emoji_variation_sequences = sequences 421 _emoji_sequences = parse_unicode_datafile( 422 path.join(ucd_path, 'emoji-sequences.txt')) 423 _emoji_zwj_sequences = parse_unicode_datafile( 424 path.join(ucd_path, 'emoji-zwj-sequences.txt')) 425 426 # filter modern pentathlon, as it seems likely to be removed from final spec 427 # also filter rifle 428 def is_excluded(n): 429 return n in [0x1f93b, 0x1f946] 430 431 def contains_excluded(t): 432 if type(t) == int: 433 return is_excluded(t) 434 return any(is_excluded(cp) for cp in t) 435 436 # filter modern pentathlon, as it seems likely to be removed from final spec 437 _emoji_properties['Emoji'] = set( 438 t for t in _emoji_properties['Emoji'] if not contains_excluded(t)) 439 _emoji_sequences = dict( 440 (t, v) for (t, v) in _emoji_sequences.items() if not contains_excluded(t)) 441 442 # add in UN flag 443 UN_seq = flag_sequence('UN') 444 _emoji_sequences[UN_seq] = 'Emoji_Flag_Sequence' 445 446 447def flag_sequence(territory_code): 448 return tuple(0x1F1E6 + ord(ch) - ord('A') for ch in territory_code) 449 450 451UNSUPPORTED_FLAGS = frozenset({ 452 flag_sequence('BL'), flag_sequence('BQ'), flag_sequence('DG'), 453 flag_sequence('EA'), flag_sequence('EH'), flag_sequence('FK'), 454 flag_sequence('GF'), flag_sequence('GP'), flag_sequence('GS'), 455 flag_sequence('MF'), flag_sequence('MQ'), flag_sequence('NC'), 456 flag_sequence('PM'), flag_sequence('RE'), flag_sequence('TF'), 457 flag_sequence('WF'), flag_sequence('XK'), flag_sequence('YT'), 458}) 459 460EQUIVALENT_FLAGS = { 461 flag_sequence('BV'): flag_sequence('NO'), 462 flag_sequence('CP'): flag_sequence('FR'), 463 flag_sequence('HM'): flag_sequence('AU'), 464 flag_sequence('SJ'): flag_sequence('NO'), 465 flag_sequence('UM'): flag_sequence('US'), 466} 467 468COMBINING_KEYCAP = 0x20E3 469 470LEGACY_ANDROID_EMOJI = { 471 0xFE4E5: flag_sequence('JP'), 472 0xFE4E6: flag_sequence('US'), 473 0xFE4E7: flag_sequence('FR'), 474 0xFE4E8: flag_sequence('DE'), 475 0xFE4E9: flag_sequence('IT'), 476 0xFE4EA: flag_sequence('GB'), 477 0xFE4EB: flag_sequence('ES'), 478 0xFE4EC: flag_sequence('RU'), 479 0xFE4ED: flag_sequence('CN'), 480 0xFE4EE: flag_sequence('KR'), 481 0xFE82C: (ord('#'), COMBINING_KEYCAP), 482 0xFE82E: (ord('1'), COMBINING_KEYCAP), 483 0xFE82F: (ord('2'), COMBINING_KEYCAP), 484 0xFE830: (ord('3'), COMBINING_KEYCAP), 485 0xFE831: (ord('4'), COMBINING_KEYCAP), 486 0xFE832: (ord('5'), COMBINING_KEYCAP), 487 0xFE833: (ord('6'), COMBINING_KEYCAP), 488 0xFE834: (ord('7'), COMBINING_KEYCAP), 489 0xFE835: (ord('8'), COMBINING_KEYCAP), 490 0xFE836: (ord('9'), COMBINING_KEYCAP), 491 0xFE837: (ord('0'), COMBINING_KEYCAP), 492} 493 494ZWJ_IDENTICALS = { 495 # KISS 496 (0x1F469, 0x200D, 0x2764, 0x200D, 0x1F48B, 0x200D, 0x1F468): 0x1F48F, 497 # COUPLE WITH HEART 498 (0x1F469, 0x200D, 0x2764, 0x200D, 0x1F468): 0x1F491, 499 # FAMILY 500 (0x1F468, 0x200D, 0x1F469, 0x200D, 0x1F466): 0x1F46A, 501} 502 503 504def is_fitzpatrick_modifier(cp): 505 return 0x1f3fb <= cp <= 0x1f3ff 506 507 508def compute_expected_emoji(): 509 equivalent_emoji = {} 510 sequence_pieces = set() 511 all_sequences = set() 512 all_sequences.update(_emoji_variation_sequences) 513 514 for sequence in _emoji_sequences.keys(): 515 sequence = tuple(ch for ch in sequence if ch != EMOJI_VS) 516 all_sequences.add(sequence) 517 sequence_pieces.update(sequence) 518 519 for sequence in _emoji_zwj_sequences.keys(): 520 sequence = tuple(ch for ch in sequence if ch != EMOJI_VS) 521 all_sequences.add(sequence) 522 sequence_pieces.update(sequence) 523 # Add reverse of all emoji ZWJ sequences, which are added to the fonts 524 # as a workaround to get the sequences work in RTL text. 525 reversed_seq = list(reversed(sequence)) 526 # if there are fitzpatrick modifiers in the sequence, keep them after 527 # the emoji they modify 528 for i in xrange(1, len(reversed_seq)): 529 if is_fitzpatrick_modifier(reversed_seq[i - 1]): 530 tmp = reversed_seq[i] 531 reversed_seq[i] = reversed_seq[i-1] 532 reversed_seq[i-1] = tmp 533 reversed_seq = tuple(reversed_seq) 534 all_sequences.add(reversed_seq) 535 equivalent_emoji[reversed_seq] = sequence 536 537 # Add all two-letter flag sequences, as even the unsupported ones should 538 # resolve to a flag tofu. 539 all_letters = [chr(code) for code in range(ord('A'), ord('Z')+1)] 540 all_two_letter_codes = itertools.product(all_letters, repeat=2) 541 all_flags = {flag_sequence(code) for code in all_two_letter_codes} 542 all_sequences.update(all_flags) 543 tofu_flags = UNSUPPORTED_FLAGS | (all_flags - set(_emoji_sequences.keys())) 544 545 all_emoji = ( 546 _emoji_properties['Emoji'] | 547 all_sequences | 548 sequence_pieces | 549 set(LEGACY_ANDROID_EMOJI.keys())) 550 default_emoji = ( 551 _emoji_properties['Emoji_Presentation'] | 552 all_sequences | 553 set(LEGACY_ANDROID_EMOJI.keys())) 554 555 first_tofu_flag = sorted(tofu_flags)[0] 556 for flag in tofu_flags: 557 if flag != first_tofu_flag: 558 equivalent_emoji[flag] = first_tofu_flag 559 equivalent_emoji.update(EQUIVALENT_FLAGS) 560 equivalent_emoji.update(LEGACY_ANDROID_EMOJI) 561 equivalent_emoji.update(ZWJ_IDENTICALS) 562 for seq in _emoji_variation_sequences: 563 equivalent_emoji[seq] = seq[0] 564 565 return all_emoji, default_emoji, equivalent_emoji 566 567 568def main(): 569 global _fonts_dir 570 target_out = sys.argv[1] 571 _fonts_dir = path.join(target_out, 'fonts') 572 573 fonts_xml_path = path.join(target_out, 'etc', 'fonts.xml') 574 parse_fonts_xml(fonts_xml_path) 575 576 hyphens_dir = path.join(target_out, 'usr', 'hyphen-data') 577 check_hyphens(hyphens_dir) 578 579 check_emoji = sys.argv[2] 580 if check_emoji == 'true': 581 ucd_path = sys.argv[3] 582 parse_ucd(ucd_path) 583 all_emoji, default_emoji, equivalent_emoji = compute_expected_emoji() 584 check_emoji_coverage(all_emoji, equivalent_emoji) 585 check_emoji_defaults(default_emoji) 586 587 588if __name__ == '__main__': 589 main() 590