1 /* 2 * Copyright © 2019 Adobe Inc. 3 * Copyright © 2019 Ebrahim Byagowi 4 * 5 * This is part of HarfBuzz, a text shaping library. 6 * 7 * Permission is hereby granted, without written agreement and without 8 * license or royalty fees, to use, copy, modify, and distribute this 9 * software and its documentation for any purpose, provided that the 10 * above copyright notice and the following two paragraphs appear in 11 * all copies of this software. 12 * 13 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR 14 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 15 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN 16 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH 17 * DAMAGE. 18 * 19 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, 20 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 21 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS 22 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO 23 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 24 * 25 * Adobe Author(s): Michiharu Ariza 26 */ 27 28 #ifndef HB_OT_VAR_GVAR_TABLE_HH 29 #define HB_OT_VAR_GVAR_TABLE_HH 30 31 #include "hb-open-type.hh" 32 #include "hb-ot-var-common.hh" 33 34 /* 35 * gvar -- Glyph Variation Table 36 * https://docs.microsoft.com/en-us/typography/opentype/spec/gvar 37 */ 38 #define HB_OT_TAG_gvar HB_TAG('g','v','a','r') 39 40 namespace OT { 41 42 struct GlyphVariationData : TupleVariationData 43 {}; 44 45 struct glyph_variations_t 46 { 47 using tuple_variations_t = TupleVariationData::tuple_variations_t; 48 hb_vector_t<tuple_variations_t> glyph_variations; 49 50 hb_vector_t<char> compiled_shared_tuples; 51 private: 52 unsigned shared_tuples_count = 0; 53 54 /* shared coords-> index map after instantiation */ 55 hb_hashmap_t<const hb_vector_t<char>*, unsigned> shared_tuples_idx_map; 56 57 public: compiled_shared_tuples_countOT::glyph_variations_t58 unsigned compiled_shared_tuples_count () const 59 { return shared_tuples_count; } 60 compiled_byte_sizeOT::glyph_variations_t61 unsigned compiled_byte_size () const 62 { 63 unsigned byte_size = 0; 64 for (const auto& _ : glyph_variations) 65 byte_size += _.get_compiled_byte_size (); 66 67 return byte_size; 68 } 69 create_from_glyphs_var_dataOT::glyph_variations_t70 bool create_from_glyphs_var_data (unsigned axis_count, 71 const hb_array_t<const F2DOT14> shared_tuples, 72 const hb_subset_plan_t *plan, 73 const hb_hashmap_t<hb_codepoint_t, hb_bytes_t>& new_gid_var_data_map) 74 { 75 if (unlikely (!glyph_variations.alloc (plan->new_to_old_gid_list.length, true))) 76 return false; 77 78 auto it = hb_iter (plan->new_to_old_gid_list); 79 for (auto &_ : it) 80 { 81 hb_codepoint_t new_gid = _.first; 82 contour_point_vector_t *all_contour_points; 83 if (!new_gid_var_data_map.has (new_gid) || 84 !plan->new_gid_contour_points_map.has (new_gid, &all_contour_points)) 85 return false; 86 hb_bytes_t var_data = new_gid_var_data_map.get (new_gid); 87 88 const GlyphVariationData* p = reinterpret_cast<const GlyphVariationData*> (var_data.arrayZ); 89 hb_vector_t<unsigned> shared_indices; 90 GlyphVariationData::tuple_iterator_t iterator; 91 tuple_variations_t tuple_vars; 92 93 /* in case variation data is empty, push an empty struct into the vector, 94 * keep the vector in sync with the new_to_old_gid_list */ 95 if (!var_data || ! p->has_data () || !all_contour_points->length || 96 !GlyphVariationData::get_tuple_iterator (var_data, axis_count, 97 var_data.arrayZ, 98 shared_indices, &iterator)) 99 { 100 glyph_variations.push (std::move (tuple_vars)); 101 continue; 102 } 103 104 if (!p->decompile_tuple_variations (all_contour_points->length, true /* is_gvar */, 105 iterator, &(plan->axes_old_index_tag_map), 106 shared_indices, shared_tuples, 107 tuple_vars /* OUT */)) 108 return false; 109 glyph_variations.push (std::move (tuple_vars)); 110 } 111 return !glyph_variations.in_error () && glyph_variations.length == plan->new_to_old_gid_list.length; 112 } 113 instantiateOT::glyph_variations_t114 bool instantiate (const hb_subset_plan_t *plan) 115 { 116 unsigned count = plan->new_to_old_gid_list.length; 117 for (unsigned i = 0; i < count; i++) 118 { 119 hb_codepoint_t new_gid = plan->new_to_old_gid_list[i].first; 120 contour_point_vector_t *all_points; 121 if (!plan->new_gid_contour_points_map.has (new_gid, &all_points)) 122 return false; 123 if (!glyph_variations[i].instantiate (plan->axes_location, plan->axes_triple_distances, all_points)) 124 return false; 125 } 126 return true; 127 } 128 compile_bytesOT::glyph_variations_t129 bool compile_bytes (const hb_map_t& axes_index_map, 130 const hb_map_t& axes_old_index_tag_map) 131 { 132 if (!compile_shared_tuples (axes_index_map, axes_old_index_tag_map)) 133 return false; 134 for (tuple_variations_t& vars: glyph_variations) 135 if (!vars.compile_bytes (axes_index_map, axes_old_index_tag_map, 136 true, /* use shared points*/ 137 &shared_tuples_idx_map)) 138 return false; 139 140 return true; 141 } 142 compile_shared_tuplesOT::glyph_variations_t143 bool compile_shared_tuples (const hb_map_t& axes_index_map, 144 const hb_map_t& axes_old_index_tag_map) 145 { 146 /* key is pointer to compiled_peak_coords inside each tuple, hashing 147 * function will always deref pointers first */ 148 hb_hashmap_t<const hb_vector_t<char>*, unsigned> coords_count_map; 149 150 /* count the num of shared coords */ 151 for (tuple_variations_t& vars: glyph_variations) 152 { 153 for (tuple_delta_t& var : vars.tuple_vars) 154 { 155 if (!var.compile_peak_coords (axes_index_map, axes_old_index_tag_map)) 156 return false; 157 unsigned* count; 158 if (coords_count_map.has (&(var.compiled_peak_coords), &count)) 159 coords_count_map.set (&(var.compiled_peak_coords), *count + 1); 160 else 161 coords_count_map.set (&(var.compiled_peak_coords), 1); 162 } 163 } 164 165 if (!coords_count_map || coords_count_map.in_error ()) 166 return false; 167 168 /* add only those coords that are used more than once into the vector and sort */ 169 hb_vector_t<const hb_vector_t<char>*> shared_coords; 170 if (unlikely (!shared_coords.alloc (coords_count_map.get_population ()))) 171 return false; 172 173 for (const auto _ : coords_count_map.iter ()) 174 { 175 if (_.second == 1) continue; 176 shared_coords.push (_.first); 177 } 178 179 /* no shared tuples: no coords are used more than once */ 180 if (!shared_coords) return true; 181 /* sorting based on the coords frequency first (high to low), then compare 182 * the coords bytes */ 183 hb_qsort (shared_coords.arrayZ, shared_coords.length, sizeof (hb_vector_t<char>*), _cmp_coords, (void *) (&coords_count_map)); 184 185 /* build shared_coords->idx map and shared tuples byte array */ 186 187 shared_tuples_count = hb_min (0xFFFu + 1, shared_coords.length); 188 unsigned len = shared_tuples_count * (shared_coords[0]->length); 189 if (unlikely (!compiled_shared_tuples.alloc (len))) 190 return false; 191 192 for (unsigned i = 0; i < shared_tuples_count; i++) 193 { 194 shared_tuples_idx_map.set (shared_coords[i], i); 195 /* add a concat() in hb_vector_t? */ 196 for (char c : shared_coords[i]->iter ()) 197 compiled_shared_tuples.push (c); 198 } 199 200 return true; 201 } 202 _cmp_coordsOT::glyph_variations_t203 static int _cmp_coords (const void *pa, const void *pb, void *arg) 204 { 205 const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* coords_count_map = 206 reinterpret_cast<const hb_hashmap_t<const hb_vector_t<char>*, unsigned>*> (arg); 207 208 /* shared_coords is hb_vector_t<const hb_vector_t<char>*> so casting pa/pb 209 * to be a pointer to a pointer */ 210 const hb_vector_t<char>** a = reinterpret_cast<const hb_vector_t<char>**> (const_cast<void*>(pa)); 211 const hb_vector_t<char>** b = reinterpret_cast<const hb_vector_t<char>**> (const_cast<void*>(pb)); 212 213 bool has_a = coords_count_map->has (*a); 214 bool has_b = coords_count_map->has (*b); 215 216 if (has_a && has_b) 217 { 218 unsigned a_num = coords_count_map->get (*a); 219 unsigned b_num = coords_count_map->get (*b); 220 221 if (a_num != b_num) 222 return b_num - a_num; 223 224 return (*b)->as_array().cmp ((*a)->as_array ()); 225 } 226 else if (has_a) return -1; 227 else if (has_b) return 1; 228 else return 0; 229 } 230 231 template<typename Iterator, 232 hb_requires (hb_is_iterator (Iterator))> serialize_glyph_var_dataOT::glyph_variations_t233 bool serialize_glyph_var_data (hb_serialize_context_t *c, 234 Iterator it, 235 bool long_offset, 236 unsigned num_glyphs, 237 char* glyph_var_data_offsets /* OUT: glyph var data offsets array */) const 238 { 239 TRACE_SERIALIZE (this); 240 241 if (long_offset) 242 { 243 ((HBUINT32 *) glyph_var_data_offsets)[0] = 0; 244 glyph_var_data_offsets += 4; 245 } 246 else 247 { 248 ((HBUINT16 *) glyph_var_data_offsets)[0] = 0; 249 glyph_var_data_offsets += 2; 250 } 251 unsigned glyph_offset = 0; 252 hb_codepoint_t last_gid = 0; 253 unsigned idx = 0; 254 255 TupleVariationData* cur_glyph = c->start_embed<TupleVariationData> (); 256 if (!cur_glyph) return_trace (false); 257 for (auto &_ : it) 258 { 259 hb_codepoint_t gid = _.first; 260 if (long_offset) 261 for (; last_gid < gid; last_gid++) 262 ((HBUINT32 *) glyph_var_data_offsets)[last_gid] = glyph_offset; 263 else 264 for (; last_gid < gid; last_gid++) 265 ((HBUINT16 *) glyph_var_data_offsets)[last_gid] = glyph_offset / 2; 266 267 if (idx >= glyph_variations.length) return_trace (false); 268 if (!cur_glyph->serialize (c, true, glyph_variations[idx])) return_trace (false); 269 TupleVariationData* next_glyph = c->start_embed<TupleVariationData> (); 270 glyph_offset += (char *) next_glyph - (char *) cur_glyph; 271 272 if (long_offset) 273 ((HBUINT32 *) glyph_var_data_offsets)[gid] = glyph_offset; 274 else 275 ((HBUINT16 *) glyph_var_data_offsets)[gid] = glyph_offset / 2; 276 277 last_gid++; 278 idx++; 279 cur_glyph = next_glyph; 280 } 281 282 if (long_offset) 283 for (; last_gid < num_glyphs; last_gid++) 284 ((HBUINT32 *) glyph_var_data_offsets)[last_gid] = glyph_offset; 285 else 286 for (; last_gid < num_glyphs; last_gid++) 287 ((HBUINT16 *) glyph_var_data_offsets)[last_gid] = glyph_offset / 2; 288 return_trace (true); 289 } 290 }; 291 292 struct gvar 293 { 294 static constexpr hb_tag_t tableTag = HB_OT_TAG_gvar; 295 sanitize_shallowOT::gvar296 bool sanitize_shallow (hb_sanitize_context_t *c) const 297 { 298 TRACE_SANITIZE (this); 299 return_trace (c->check_struct (this) && (version.major == 1) && 300 sharedTuples.sanitize (c, this, axisCount * sharedTupleCount) && 301 (is_long_offset () ? 302 c->check_array (get_long_offset_array (), c->get_num_glyphs () + 1) : 303 c->check_array (get_short_offset_array (), c->get_num_glyphs () + 1))); 304 } 305 306 /* GlyphVariationData not sanitized here; must be checked while accessing each glyph variation data */ sanitizeOT::gvar307 bool sanitize (hb_sanitize_context_t *c) const 308 { return sanitize_shallow (c); } 309 decompile_glyph_variationsOT::gvar310 bool decompile_glyph_variations (hb_subset_context_t *c, 311 glyph_variations_t& glyph_vars /* OUT */) const 312 { 313 hb_hashmap_t<hb_codepoint_t, hb_bytes_t> new_gid_var_data_map; 314 auto it = hb_iter (c->plan->new_to_old_gid_list); 315 if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) 316 { 317 new_gid_var_data_map.set (0, hb_bytes_t ()); 318 it++; 319 } 320 321 for (auto &_ : it) 322 { 323 hb_codepoint_t new_gid = _.first; 324 hb_codepoint_t old_gid = _.second; 325 hb_bytes_t var_data_bytes = get_glyph_var_data_bytes (c->source_blob, glyphCountX, old_gid); 326 new_gid_var_data_map.set (new_gid, var_data_bytes); 327 } 328 329 if (new_gid_var_data_map.in_error ()) return false; 330 331 hb_array_t<const F2DOT14> shared_tuples = (this+sharedTuples).as_array ((unsigned) sharedTupleCount * (unsigned) axisCount); 332 return glyph_vars.create_from_glyphs_var_data (axisCount, shared_tuples, c->plan, new_gid_var_data_map); 333 } 334 335 template<typename Iterator, 336 hb_requires (hb_is_iterator (Iterator))> serializeOT::gvar337 bool serialize (hb_serialize_context_t *c, 338 const glyph_variations_t& glyph_vars, 339 Iterator it, 340 unsigned axis_count, 341 unsigned num_glyphs) const 342 { 343 TRACE_SERIALIZE (this); 344 gvar *out = c->allocate_min<gvar> (); 345 if (unlikely (!out)) return_trace (false); 346 347 out->version.major = 1; 348 out->version.minor = 0; 349 out->axisCount = axis_count; 350 out->glyphCountX = hb_min (0xFFFFu, num_glyphs); 351 352 unsigned glyph_var_data_size = glyph_vars.compiled_byte_size (); 353 bool long_offset = glyph_var_data_size & ~0xFFFFu; 354 out->flags = long_offset ? 1 : 0; 355 356 HBUINT8 *glyph_var_data_offsets = c->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false); 357 if (!glyph_var_data_offsets) return_trace (false); 358 359 /* shared tuples */ 360 unsigned shared_tuple_count = glyph_vars.compiled_shared_tuples_count (); 361 out->sharedTupleCount = shared_tuple_count; 362 363 if (!shared_tuple_count) 364 out->sharedTuples = 0; 365 else 366 { 367 hb_array_t<const char> shared_tuples = glyph_vars.compiled_shared_tuples.as_array ().copy (c); 368 if (!shared_tuples.arrayZ) return_trace (false); 369 out->sharedTuples = shared_tuples.arrayZ - (char *) out; 370 } 371 372 char *glyph_var_data = c->start_embed<char> (); 373 if (!glyph_var_data) return_trace (false); 374 out->dataZ = glyph_var_data - (char *) out; 375 376 return_trace (glyph_vars.serialize_glyph_var_data (c, it, long_offset, num_glyphs, 377 (char *) glyph_var_data_offsets)); 378 } 379 instantiateOT::gvar380 bool instantiate (hb_subset_context_t *c) const 381 { 382 TRACE_SUBSET (this); 383 glyph_variations_t glyph_vars; 384 if (!decompile_glyph_variations (c, glyph_vars)) 385 return_trace (false); 386 387 if (!glyph_vars.instantiate (c->plan)) return_trace (false); 388 if (!glyph_vars.compile_bytes (c->plan->axes_index_map, c->plan->axes_old_index_tag_map)) 389 return_trace (false); 390 391 unsigned axis_count = c->plan->axes_index_map.get_population (); 392 unsigned num_glyphs = c->plan->num_output_glyphs (); 393 auto it = hb_iter (c->plan->new_to_old_gid_list); 394 return_trace (serialize (c->serializer, glyph_vars, it, axis_count, num_glyphs)); 395 } 396 subsetOT::gvar397 bool subset (hb_subset_context_t *c) const 398 { 399 TRACE_SUBSET (this); 400 if (c->plan->all_axes_pinned) 401 return_trace (false); 402 403 if (c->plan->normalized_coords) 404 return_trace (instantiate (c)); 405 406 unsigned glyph_count = version.to_int () ? c->plan->source->get_num_glyphs () : 0; 407 408 gvar *out = c->serializer->allocate_min<gvar> (); 409 if (unlikely (!out)) return_trace (false); 410 411 out->version.major = 1; 412 out->version.minor = 0; 413 out->axisCount = axisCount; 414 out->sharedTupleCount = sharedTupleCount; 415 416 unsigned int num_glyphs = c->plan->num_output_glyphs (); 417 out->glyphCountX = hb_min (0xFFFFu, num_glyphs); 418 419 auto it = hb_iter (c->plan->new_to_old_gid_list); 420 if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) 421 it++; 422 unsigned int subset_data_size = 0; 423 for (auto &_ : it) 424 { 425 hb_codepoint_t old_gid = _.second; 426 subset_data_size += get_glyph_var_data_bytes (c->source_blob, glyph_count, old_gid).length; 427 } 428 429 bool long_offset = subset_data_size & ~0xFFFFu; 430 out->flags = long_offset ? 1 : 0; 431 432 HBUINT8 *subset_offsets = c->serializer->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false); 433 if (!subset_offsets) return_trace (false); 434 435 /* shared tuples */ 436 if (!sharedTupleCount || !sharedTuples) 437 out->sharedTuples = 0; 438 else 439 { 440 unsigned int shared_tuple_size = F2DOT14::static_size * axisCount * sharedTupleCount; 441 F2DOT14 *tuples = c->serializer->allocate_size<F2DOT14> (shared_tuple_size); 442 if (!tuples) return_trace (false); 443 out->sharedTuples = (char *) tuples - (char *) out; 444 hb_memcpy (tuples, this+sharedTuples, shared_tuple_size); 445 } 446 447 char *subset_data = c->serializer->allocate_size<char> (subset_data_size, false); 448 if (!subset_data) return_trace (false); 449 out->dataZ = subset_data - (char *) out; 450 451 452 if (long_offset) 453 { 454 ((HBUINT32 *) subset_offsets)[0] = 0; 455 subset_offsets += 4; 456 } 457 else 458 { 459 ((HBUINT16 *) subset_offsets)[0] = 0; 460 subset_offsets += 2; 461 } 462 unsigned int glyph_offset = 0; 463 464 hb_codepoint_t last = 0; 465 it = hb_iter (c->plan->new_to_old_gid_list); 466 if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) 467 it++; 468 for (auto &_ : it) 469 { 470 hb_codepoint_t gid = _.first; 471 hb_codepoint_t old_gid = _.second; 472 473 if (long_offset) 474 for (; last < gid; last++) 475 ((HBUINT32 *) subset_offsets)[last] = glyph_offset; 476 else 477 for (; last < gid; last++) 478 ((HBUINT16 *) subset_offsets)[last] = glyph_offset / 2; 479 480 hb_bytes_t var_data_bytes = get_glyph_var_data_bytes (c->source_blob, 481 glyph_count, 482 old_gid); 483 484 hb_memcpy (subset_data, var_data_bytes.arrayZ, var_data_bytes.length); 485 subset_data += var_data_bytes.length; 486 glyph_offset += var_data_bytes.length; 487 488 if (long_offset) 489 ((HBUINT32 *) subset_offsets)[gid] = glyph_offset; 490 else 491 ((HBUINT16 *) subset_offsets)[gid] = glyph_offset / 2; 492 493 last++; // Skip over gid 494 } 495 496 if (long_offset) 497 for (; last < num_glyphs; last++) 498 ((HBUINT32 *) subset_offsets)[last] = glyph_offset; 499 else 500 for (; last < num_glyphs; last++) 501 ((HBUINT16 *) subset_offsets)[last] = glyph_offset / 2; 502 503 return_trace (true); 504 } 505 506 protected: get_glyph_var_data_bytesOT::gvar507 const hb_bytes_t get_glyph_var_data_bytes (hb_blob_t *blob, 508 unsigned glyph_count, 509 hb_codepoint_t glyph) const 510 { 511 unsigned start_offset = get_offset (glyph_count, glyph); 512 unsigned end_offset = get_offset (glyph_count, glyph+1); 513 if (unlikely (end_offset < start_offset)) return hb_bytes_t (); 514 unsigned length = end_offset - start_offset; 515 hb_bytes_t var_data = blob->as_bytes ().sub_array (((unsigned) dataZ) + start_offset, length); 516 return likely (var_data.length >= GlyphVariationData::min_size) ? var_data : hb_bytes_t (); 517 } 518 is_long_offsetOT::gvar519 bool is_long_offset () const { return flags & 1; } 520 get_offsetOT::gvar521 unsigned get_offset (unsigned glyph_count, unsigned i) const 522 { 523 if (unlikely (i > glyph_count)) return 0; 524 _hb_compiler_memory_r_barrier (); 525 return is_long_offset () ? get_long_offset_array ()[i] : get_short_offset_array ()[i] * 2; 526 } 527 get_long_offset_arrayOT::gvar528 const HBUINT32 * get_long_offset_array () const { return (const HBUINT32 *) &offsetZ; } get_short_offset_arrayOT::gvar529 const HBUINT16 *get_short_offset_array () const { return (const HBUINT16 *) &offsetZ; } 530 531 public: 532 struct accelerator_t 533 { accelerator_tOT::gvar::accelerator_t534 accelerator_t (hb_face_t *face) 535 { 536 table = hb_sanitize_context_t ().reference_table<gvar> (face); 537 /* If sanitize failed, set glyphCount to 0. */ 538 glyphCount = table->version.to_int () ? face->get_num_glyphs () : 0; 539 540 /* For shared tuples that only have one axis active, shared the index of 541 * that axis as a cache. This will speed up caclulate_scalar() a lot 542 * for fonts with lots of axes and many "monovar" tuples. */ 543 hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount); 544 unsigned count = table->sharedTupleCount; 545 if (unlikely (!shared_tuple_active_idx.resize (count, false))) return; 546 unsigned axis_count = table->axisCount; 547 for (unsigned i = 0; i < count; i++) 548 { 549 hb_array_t<const F2DOT14> tuple = shared_tuples.sub_array (axis_count * i, axis_count); 550 int idx1 = -1, idx2 = -1; 551 for (unsigned j = 0; j < axis_count; j++) 552 { 553 const F2DOT14 &peak = tuple.arrayZ[j]; 554 if (peak.to_int () != 0) 555 { 556 if (idx1 == -1) 557 idx1 = j; 558 else if (idx2 == -1) 559 idx2 = j; 560 else 561 { 562 idx1 = idx2 = -1; 563 break; 564 } 565 } 566 } 567 shared_tuple_active_idx.arrayZ[i] = {idx1, idx2}; 568 } 569 } ~accelerator_tOT::gvar::accelerator_t570 ~accelerator_t () { table.destroy (); } 571 572 private: 573 infer_deltaOT::gvar::accelerator_t574 static float infer_delta (const hb_array_t<contour_point_t> points, 575 const hb_array_t<contour_point_t> deltas, 576 unsigned int target, unsigned int prev, unsigned int next, 577 float contour_point_t::*m) 578 { 579 float target_val = points.arrayZ[target].*m; 580 float prev_val = points.arrayZ[prev].*m; 581 float next_val = points.arrayZ[next].*m; 582 float prev_delta = deltas.arrayZ[prev].*m; 583 float next_delta = deltas.arrayZ[next].*m; 584 585 if (prev_val == next_val) 586 return (prev_delta == next_delta) ? prev_delta : 0.f; 587 else if (target_val <= hb_min (prev_val, next_val)) 588 return (prev_val < next_val) ? prev_delta : next_delta; 589 else if (target_val >= hb_max (prev_val, next_val)) 590 return (prev_val > next_val) ? prev_delta : next_delta; 591 592 /* linear interpolation */ 593 float r = (target_val - prev_val) / (next_val - prev_val); 594 return prev_delta + r * (next_delta - prev_delta); 595 } 596 next_indexOT::gvar::accelerator_t597 static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end) 598 { return (i >= end) ? start : (i + 1); } 599 600 public: apply_deltas_to_pointsOT::gvar::accelerator_t601 bool apply_deltas_to_points (hb_codepoint_t glyph, 602 hb_array_t<int> coords, 603 const hb_array_t<contour_point_t> points, 604 bool phantom_only = false) const 605 { 606 if (unlikely (glyph >= glyphCount)) return true; 607 608 hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyphCount, glyph); 609 if (!var_data_bytes.as<GlyphVariationData> ()->has_data ()) return true; 610 hb_vector_t<unsigned int> shared_indices; 611 GlyphVariationData::tuple_iterator_t iterator; 612 if (!GlyphVariationData::get_tuple_iterator (var_data_bytes, table->axisCount, 613 var_data_bytes.arrayZ, 614 shared_indices, &iterator)) 615 return true; /* so isn't applied at all */ 616 617 /* Save original points for inferred delta calculation */ 618 contour_point_vector_t orig_points_vec; // Populated lazily 619 auto orig_points = orig_points_vec.as_array (); 620 621 /* flag is used to indicate referenced point */ 622 contour_point_vector_t deltas_vec; // Populated lazily 623 auto deltas = deltas_vec.as_array (); 624 625 hb_vector_t<unsigned> end_points; // Populated lazily 626 627 unsigned num_coords = table->axisCount; 628 hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * num_coords); 629 630 hb_vector_t<unsigned int> private_indices; 631 hb_vector_t<int> x_deltas; 632 hb_vector_t<int> y_deltas; 633 unsigned count = points.length; 634 bool flush = false; 635 do 636 { 637 float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples, 638 &shared_tuple_active_idx); 639 if (scalar == 0.f) continue; 640 const HBUINT8 *p = iterator.get_serialized_data (); 641 unsigned int length = iterator.current_tuple->get_data_size (); 642 if (unlikely (!iterator.var_data_bytes.check_range (p, length))) 643 return false; 644 645 if (!deltas) 646 { 647 if (unlikely (!deltas_vec.resize (count, false))) return false; 648 deltas = deltas_vec.as_array (); 649 hb_memset (deltas.arrayZ + (phantom_only ? count - 4 : 0), 0, 650 (phantom_only ? 4 : count) * sizeof (deltas[0])); 651 } 652 653 const HBUINT8 *end = p + length; 654 655 bool has_private_points = iterator.current_tuple->has_private_points (); 656 if (has_private_points && 657 !GlyphVariationData::unpack_points (p, private_indices, end)) 658 return false; 659 const hb_array_t<unsigned int> &indices = has_private_points ? private_indices : shared_indices; 660 661 bool apply_to_all = (indices.length == 0); 662 unsigned int num_deltas = apply_to_all ? points.length : indices.length; 663 if (unlikely (!x_deltas.resize (num_deltas, false))) return false; 664 if (unlikely (!GlyphVariationData::unpack_deltas (p, x_deltas, end))) return false; 665 if (unlikely (!y_deltas.resize (num_deltas, false))) return false; 666 if (unlikely (!GlyphVariationData::unpack_deltas (p, y_deltas, end))) return false; 667 668 if (!apply_to_all) 669 { 670 if (!orig_points && !phantom_only) 671 { 672 orig_points_vec.extend (points); 673 if (unlikely (orig_points_vec.in_error ())) return false; 674 orig_points = orig_points_vec.as_array (); 675 } 676 677 if (flush) 678 { 679 for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) 680 points.arrayZ[i].translate (deltas.arrayZ[i]); 681 flush = false; 682 683 } 684 hb_memset (deltas.arrayZ + (phantom_only ? count - 4 : 0), 0, 685 (phantom_only ? 4 : count) * sizeof (deltas[0])); 686 } 687 688 if (HB_OPTIMIZE_SIZE_VAL) 689 { 690 for (unsigned int i = 0; i < num_deltas; i++) 691 { 692 unsigned int pt_index; 693 if (apply_to_all) 694 pt_index = i; 695 else 696 { 697 pt_index = indices[i]; 698 if (unlikely (pt_index >= deltas.length)) continue; 699 } 700 if (phantom_only && pt_index < count - 4) continue; 701 auto &delta = deltas.arrayZ[pt_index]; 702 delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ 703 delta.x += x_deltas.arrayZ[i] * scalar; 704 delta.y += y_deltas.arrayZ[i] * scalar; 705 } 706 } 707 else 708 { 709 /* Ouch. Four cases... for optimization. */ 710 if (scalar != 1.0f) 711 { 712 if (apply_to_all) 713 for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) 714 { 715 unsigned int pt_index = i; 716 auto &delta = deltas.arrayZ[pt_index]; 717 delta.x += x_deltas.arrayZ[i] * scalar; 718 delta.y += y_deltas.arrayZ[i] * scalar; 719 } 720 else 721 for (unsigned int i = 0; i < num_deltas; i++) 722 { 723 unsigned int pt_index = indices[i]; 724 if (unlikely (pt_index >= deltas.length)) continue; 725 if (phantom_only && pt_index < count - 4) continue; 726 auto &delta = deltas.arrayZ[pt_index]; 727 delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ 728 delta.x += x_deltas.arrayZ[i] * scalar; 729 delta.y += y_deltas.arrayZ[i] * scalar; 730 } 731 } 732 else 733 { 734 if (apply_to_all) 735 for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) 736 { 737 unsigned int pt_index = i; 738 auto &delta = deltas.arrayZ[pt_index]; 739 delta.x += x_deltas.arrayZ[i]; 740 delta.y += y_deltas.arrayZ[i]; 741 } 742 else 743 for (unsigned int i = 0; i < num_deltas; i++) 744 { 745 unsigned int pt_index = indices[i]; 746 if (unlikely (pt_index >= deltas.length)) continue; 747 if (phantom_only && pt_index < count - 4) continue; 748 auto &delta = deltas.arrayZ[pt_index]; 749 delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ 750 delta.x += x_deltas.arrayZ[i]; 751 delta.y += y_deltas.arrayZ[i]; 752 } 753 } 754 } 755 756 /* infer deltas for unreferenced points */ 757 if (!apply_to_all && !phantom_only) 758 { 759 if (!end_points) 760 { 761 for (unsigned i = 0; i < count; ++i) 762 if (points.arrayZ[i].is_end_point) 763 end_points.push (i); 764 if (unlikely (end_points.in_error ())) return false; 765 } 766 767 unsigned start_point = 0; 768 for (unsigned end_point : end_points) 769 { 770 /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */ 771 unsigned unref_count = 0; 772 for (unsigned i = start_point; i < end_point + 1; i++) 773 unref_count += deltas.arrayZ[i].flag; 774 unref_count = (end_point - start_point + 1) - unref_count; 775 776 unsigned j = start_point; 777 if (unref_count == 0 || unref_count > end_point - start_point) 778 goto no_more_gaps; 779 780 for (;;) 781 { 782 /* Locate the next gap of unreferenced points between two referenced points prev and next. 783 * Note that a gap may wrap around at left (start_point) and/or at right (end_point). 784 */ 785 unsigned int prev, next, i; 786 for (;;) 787 { 788 i = j; 789 j = next_index (i, start_point, end_point); 790 if (deltas.arrayZ[i].flag && !deltas.arrayZ[j].flag) break; 791 } 792 prev = j = i; 793 for (;;) 794 { 795 i = j; 796 j = next_index (i, start_point, end_point); 797 if (!deltas.arrayZ[i].flag && deltas.arrayZ[j].flag) break; 798 } 799 next = j; 800 /* Infer deltas for all unref points in the gap between prev and next */ 801 i = prev; 802 for (;;) 803 { 804 i = next_index (i, start_point, end_point); 805 if (i == next) break; 806 deltas.arrayZ[i].x = infer_delta (orig_points, deltas, i, prev, next, &contour_point_t::x); 807 deltas.arrayZ[i].y = infer_delta (orig_points, deltas, i, prev, next, &contour_point_t::y); 808 if (--unref_count == 0) goto no_more_gaps; 809 } 810 } 811 no_more_gaps: 812 start_point = end_point + 1; 813 } 814 } 815 816 flush = true; 817 818 } while (iterator.move_to_next ()); 819 820 if (flush) 821 { 822 for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) 823 points.arrayZ[i].translate (deltas.arrayZ[i]); 824 } 825 826 return true; 827 } 828 get_axis_countOT::gvar::accelerator_t829 unsigned int get_axis_count () const { return table->axisCount; } 830 831 private: 832 hb_blob_ptr_t<gvar> table; 833 unsigned glyphCount; 834 hb_vector_t<hb_pair_t<int, int>> shared_tuple_active_idx; 835 }; 836 837 protected: 838 FixedVersion<>version; /* Version number of the glyph variations table 839 * Set to 0x00010000u. */ 840 HBUINT16 axisCount; /* The number of variation axes for this font. This must be 841 * the same number as axisCount in the 'fvar' table. */ 842 HBUINT16 sharedTupleCount; 843 /* The number of shared tuple records. Shared tuple records 844 * can be referenced within glyph variation data tables for 845 * multiple glyphs, as opposed to other tuple records stored 846 * directly within a glyph variation data table. */ 847 NNOffset32To<UnsizedArrayOf<F2DOT14>> 848 sharedTuples; /* Offset from the start of this table to the shared tuple records. 849 * Array of tuple records shared across all glyph variation data tables. */ 850 HBUINT16 glyphCountX; /* The number of glyphs in this font. This must match the number of 851 * glyphs stored elsewhere in the font. */ 852 HBUINT16 flags; /* Bit-field that gives the format of the offset array that follows. 853 * If bit 0 is clear, the offsets are uint16; if bit 0 is set, the 854 * offsets are uint32. */ 855 Offset32To<GlyphVariationData> 856 dataZ; /* Offset from the start of this table to the array of 857 * GlyphVariationData tables. */ 858 UnsizedArrayOf<HBUINT8> 859 offsetZ; /* Offsets from the start of the GlyphVariationData array 860 * to each GlyphVariationData table. */ 861 public: 862 DEFINE_SIZE_ARRAY (20, offsetZ); 863 }; 864 865 struct gvar_accelerator_t : gvar::accelerator_t { gvar_accelerator_tOT::gvar_accelerator_t866 gvar_accelerator_t (hb_face_t *face) : gvar::accelerator_t (face) {} 867 }; 868 869 } /* namespace OT */ 870 871 #endif /* HB_OT_VAR_GVAR_TABLE_HH */ 872