1 /* 2 * Copyright © 2021 Google, Inc. 3 * 4 * This is part of HarfBuzz, a text shaping library. 5 * 6 * Permission is hereby granted, without written agreement and without 7 * license or royalty fees, to use, copy, modify, and distribute this 8 * software and its documentation for any purpose, provided that the 9 * above copyright notice and the following two paragraphs appear in 10 * all copies of this software. 11 * 12 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR 13 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 14 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN 15 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH 16 * DAMAGE. 17 * 18 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, 19 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 20 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS 21 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO 22 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 23 * 24 */ 25 26 #ifndef HB_OT_VAR_COMMON_HH 27 #define HB_OT_VAR_COMMON_HH 28 29 #include "hb-ot-layout-common.hh" 30 #include "hb-priority-queue.hh" 31 #include "hb-subset-instancer-iup.hh" 32 33 34 namespace OT { 35 36 37 /* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */ 38 struct TupleVariationHeader 39 { 40 friend struct tuple_delta_t; get_sizeOT::TupleVariationHeader41 unsigned get_size (unsigned axis_count) const 42 { return min_size + get_all_tuples (axis_count).get_size (); } 43 get_data_sizeOT::TupleVariationHeader44 unsigned get_data_size () const { return varDataSize; } 45 get_nextOT::TupleVariationHeader46 const TupleVariationHeader &get_next (unsigned axis_count) const 47 { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); } 48 unpack_axis_tuplesOT::TupleVariationHeader49 bool unpack_axis_tuples (unsigned axis_count, 50 const hb_array_t<const F2DOT14> shared_tuples, 51 const hb_map_t *axes_old_index_tag_map, 52 hb_hashmap_t<hb_tag_t, Triple>& axis_tuples /* OUT */) const 53 { 54 const F2DOT14 *peak_tuple = nullptr; 55 if (has_peak ()) 56 peak_tuple = get_peak_tuple (axis_count).arrayZ; 57 else 58 { 59 unsigned int index = get_index (); 60 if (unlikely ((index + 1) * axis_count > shared_tuples.length)) 61 return false; 62 peak_tuple = shared_tuples.sub_array (axis_count * index, axis_count).arrayZ; 63 } 64 65 const F2DOT14 *start_tuple = nullptr; 66 const F2DOT14 *end_tuple = nullptr; 67 bool has_interm = has_intermediate (); 68 69 if (has_interm) 70 { 71 start_tuple = get_start_tuple (axis_count).arrayZ; 72 end_tuple = get_end_tuple (axis_count).arrayZ; 73 } 74 75 for (unsigned i = 0; i < axis_count; i++) 76 { 77 float peak = peak_tuple[i].to_float (); 78 if (peak == 0.f) continue; 79 80 hb_tag_t *axis_tag; 81 if (!axes_old_index_tag_map->has (i, &axis_tag)) 82 return false; 83 84 float start, end; 85 if (has_interm) 86 { 87 start = start_tuple[i].to_float (); 88 end = end_tuple[i].to_float (); 89 } 90 else 91 { 92 start = hb_min (peak, 0.f); 93 end = hb_max (peak, 0.f); 94 } 95 axis_tuples.set (*axis_tag, Triple ((double) start, (double) peak, (double) end)); 96 } 97 98 return true; 99 } 100 calculate_scalarOT::TupleVariationHeader101 double calculate_scalar (hb_array_t<const int> coords, unsigned int coord_count, 102 const hb_array_t<const F2DOT14> shared_tuples, 103 const hb_vector_t<hb_pair_t<int,int>> *shared_tuple_active_idx = nullptr) const 104 { 105 const F2DOT14 *peak_tuple; 106 107 unsigned start_idx = 0; 108 unsigned end_idx = coord_count; 109 unsigned step = 1; 110 111 if (has_peak ()) 112 peak_tuple = get_peak_tuple (coord_count).arrayZ; 113 else 114 { 115 unsigned int index = get_index (); 116 if (unlikely ((index + 1) * coord_count > shared_tuples.length)) 117 return 0.0; 118 peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count).arrayZ; 119 120 if (shared_tuple_active_idx) 121 { 122 if (unlikely (index >= shared_tuple_active_idx->length)) 123 return 0.0; 124 auto _ = (*shared_tuple_active_idx).arrayZ[index]; 125 if (_.second != -1) 126 { 127 start_idx = _.first; 128 end_idx = _.second + 1; 129 step = _.second - _.first; 130 } 131 else if (_.first != -1) 132 { 133 start_idx = _.first; 134 end_idx = start_idx + 1; 135 } 136 } 137 } 138 139 const F2DOT14 *start_tuple = nullptr; 140 const F2DOT14 *end_tuple = nullptr; 141 bool has_interm = has_intermediate (); 142 if (has_interm) 143 { 144 start_tuple = get_start_tuple (coord_count).arrayZ; 145 end_tuple = get_end_tuple (coord_count).arrayZ; 146 } 147 148 double scalar = 1.0; 149 for (unsigned int i = start_idx; i < end_idx; i += step) 150 { 151 int peak = peak_tuple[i].to_int (); 152 if (!peak) continue; 153 154 int v = coords[i]; 155 if (v == peak) continue; 156 157 if (has_interm) 158 { 159 int start = start_tuple[i].to_int (); 160 int end = end_tuple[i].to_int (); 161 if (unlikely (start > peak || peak > end || 162 (start < 0 && end > 0 && peak))) continue; 163 if (v < start || v > end) return 0.0; 164 if (v < peak) 165 { if (peak != start) scalar *= (double) (v - start) / (peak - start); } 166 else 167 { if (peak != end) scalar *= (double) (end - v) / (end - peak); } 168 } 169 else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.0; 170 else 171 scalar *= (double) v / peak; 172 } 173 return scalar; 174 } 175 has_peakOT::TupleVariationHeader176 bool has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; } has_intermediateOT::TupleVariationHeader177 bool has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; } has_private_pointsOT::TupleVariationHeader178 bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; } get_indexOT::TupleVariationHeader179 unsigned get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; } 180 181 protected: 182 struct TuppleIndex : HBUINT16 183 { 184 enum Flags { 185 EmbeddedPeakTuple = 0x8000u, 186 IntermediateRegion = 0x4000u, 187 PrivatePointNumbers = 0x2000u, 188 TupleIndexMask = 0x0FFFu 189 }; 190 operator =OT::TupleVariationHeader::TuppleIndex191 TuppleIndex& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; } 192 DEFINE_SIZE_STATIC (2); 193 }; 194 get_all_tuplesOT::TupleVariationHeader195 hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const 196 { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); } get_peak_tupleOT::TupleVariationHeader197 hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const 198 { return get_all_tuples (axis_count).sub_array (0, axis_count); } get_start_tupleOT::TupleVariationHeader199 hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const 200 { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); } get_end_tupleOT::TupleVariationHeader201 hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const 202 { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); } 203 204 HBUINT16 varDataSize; /* The size in bytes of the serialized 205 * data for this tuple variation table. */ 206 TuppleIndex tupleIndex; /* A packed field. The high 4 bits are flags (see below). 207 The low 12 bits are an index into a shared tuple 208 records array. */ 209 /* UnsizedArrayOf<F2DOT14> peakTuple - optional */ 210 /* Peak tuple record for this tuple variation table — optional, 211 * determined by flags in the tupleIndex value. 212 * 213 * Note that this must always be included in the 'cvar' table. */ 214 /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */ 215 /* Intermediate start tuple record for this tuple variation table — optional, 216 determined by flags in the tupleIndex value. */ 217 /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */ 218 /* Intermediate end tuple record for this tuple variation table — optional, 219 * determined by flags in the tupleIndex value. */ 220 public: 221 DEFINE_SIZE_MIN (4); 222 }; 223 224 struct tuple_delta_t 225 { 226 static constexpr bool realloc_move = true; // Watch out when adding new members! 227 228 public: 229 hb_hashmap_t<hb_tag_t, Triple> axis_tuples; 230 231 /* indices_length = point_count, indice[i] = 1 means point i is referenced */ 232 hb_vector_t<bool> indices; 233 234 hb_vector_t<double> deltas_x; 235 /* empty for cvar tuples */ 236 hb_vector_t<double> deltas_y; 237 238 /* compiled data: header and deltas 239 * compiled point data is saved in a hashmap within tuple_variations_t cause 240 * some point sets might be reused by different tuple variations */ 241 hb_vector_t<unsigned char> compiled_tuple_header; 242 hb_vector_t<unsigned char> compiled_deltas; 243 244 /* compiled peak coords, empty for non-gvar tuples */ 245 hb_vector_t<char> compiled_peak_coords; 246 247 tuple_delta_t () = default; 248 tuple_delta_t (const tuple_delta_t& o) = default; 249 swap(tuple_delta_t & a,tuple_delta_t & b)250 friend void swap (tuple_delta_t& a, tuple_delta_t& b) noexcept 251 { 252 hb_swap (a.axis_tuples, b.axis_tuples); 253 hb_swap (a.indices, b.indices); 254 hb_swap (a.deltas_x, b.deltas_x); 255 hb_swap (a.deltas_y, b.deltas_y); 256 hb_swap (a.compiled_tuple_header, b.compiled_tuple_header); 257 hb_swap (a.compiled_deltas, b.compiled_deltas); 258 hb_swap (a.compiled_peak_coords, b.compiled_peak_coords); 259 } 260 tuple_delta_tOT::tuple_delta_t261 tuple_delta_t (tuple_delta_t&& o) noexcept : tuple_delta_t () 262 { hb_swap (*this, o); } 263 operator =OT::tuple_delta_t264 tuple_delta_t& operator = (tuple_delta_t&& o) noexcept 265 { 266 hb_swap (*this, o); 267 return *this; 268 } 269 remove_axisOT::tuple_delta_t270 void remove_axis (hb_tag_t axis_tag) 271 { axis_tuples.del (axis_tag); } 272 set_tentOT::tuple_delta_t273 bool set_tent (hb_tag_t axis_tag, Triple tent) 274 { return axis_tuples.set (axis_tag, tent); } 275 operator +=OT::tuple_delta_t276 tuple_delta_t& operator += (const tuple_delta_t& o) 277 { 278 unsigned num = indices.length; 279 for (unsigned i = 0; i < num; i++) 280 { 281 if (indices.arrayZ[i]) 282 { 283 if (o.indices.arrayZ[i]) 284 { 285 deltas_x[i] += o.deltas_x[i]; 286 if (deltas_y && o.deltas_y) 287 deltas_y[i] += o.deltas_y[i]; 288 } 289 } 290 else 291 { 292 if (!o.indices.arrayZ[i]) continue; 293 indices.arrayZ[i] = true; 294 deltas_x[i] = o.deltas_x[i]; 295 if (deltas_y && o.deltas_y) 296 deltas_y[i] = o.deltas_y[i]; 297 } 298 } 299 return *this; 300 } 301 operator *=OT::tuple_delta_t302 tuple_delta_t& operator *= (double scalar) 303 { 304 if (scalar == 1.0) 305 return *this; 306 307 unsigned num = indices.length; 308 if (deltas_y) 309 for (unsigned i = 0; i < num; i++) 310 { 311 if (!indices.arrayZ[i]) continue; 312 deltas_x[i] *= scalar; 313 deltas_y[i] *= scalar; 314 } 315 else 316 for (unsigned i = 0; i < num; i++) 317 { 318 if (!indices.arrayZ[i]) continue; 319 deltas_x[i] *= scalar; 320 } 321 return *this; 322 } 323 change_tuple_var_axis_limitOT::tuple_delta_t324 hb_vector_t<tuple_delta_t> change_tuple_var_axis_limit (hb_tag_t axis_tag, Triple axis_limit, 325 TripleDistances axis_triple_distances) const 326 { 327 hb_vector_t<tuple_delta_t> out; 328 Triple *tent; 329 if (!axis_tuples.has (axis_tag, &tent)) 330 { 331 out.push (*this); 332 return out; 333 } 334 335 if ((tent->minimum < 0.0 && tent->maximum > 0.0) || 336 !(tent->minimum <= tent->middle && tent->middle <= tent->maximum)) 337 return out; 338 339 if (tent->middle == 0.0) 340 { 341 out.push (*this); 342 return out; 343 } 344 345 rebase_tent_result_t solutions = rebase_tent (*tent, axis_limit, axis_triple_distances); 346 for (auto &t : solutions) 347 { 348 tuple_delta_t new_var = *this; 349 if (t.second == Triple ()) 350 new_var.remove_axis (axis_tag); 351 else 352 new_var.set_tent (axis_tag, t.second); 353 354 new_var *= t.first; 355 out.push (std::move (new_var)); 356 } 357 358 return out; 359 } 360 compile_peak_coordsOT::tuple_delta_t361 bool compile_peak_coords (const hb_map_t& axes_index_map, 362 const hb_map_t& axes_old_index_tag_map) 363 { 364 unsigned axis_count = axes_index_map.get_population (); 365 if (unlikely (!compiled_peak_coords.alloc (axis_count * F2DOT14::static_size))) 366 return false; 367 368 unsigned orig_axis_count = axes_old_index_tag_map.get_population (); 369 for (unsigned i = 0; i < orig_axis_count; i++) 370 { 371 if (!axes_index_map.has (i)) 372 continue; 373 374 hb_tag_t axis_tag = axes_old_index_tag_map.get (i); 375 Triple *coords; 376 F2DOT14 peak_coord; 377 if (axis_tuples.has (axis_tag, &coords)) 378 peak_coord.set_float (coords->middle); 379 else 380 peak_coord.set_int (0); 381 382 /* push F2DOT14 value into char vector */ 383 int16_t val = peak_coord.to_int (); 384 compiled_peak_coords.push (static_cast<char> (val >> 8)); 385 compiled_peak_coords.push (static_cast<char> (val & 0xFF)); 386 } 387 388 return !compiled_peak_coords.in_error (); 389 } 390 391 /* deltas should be compiled already before we compile tuple 392 * variation header cause we need to fill in the size of the 393 * serialized data for this tuple variation */ compile_tuple_var_headerOT::tuple_delta_t394 bool compile_tuple_var_header (const hb_map_t& axes_index_map, 395 unsigned points_data_length, 396 const hb_map_t& axes_old_index_tag_map, 397 const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map) 398 { 399 /* compiled_deltas could be empty after iup delta optimization, we can skip 400 * compiling this tuple and return true */ 401 if (!compiled_deltas) return true; 402 403 unsigned cur_axis_count = axes_index_map.get_population (); 404 /* allocate enough memory: 1 peak + 2 intermediate coords + fixed header size */ 405 unsigned alloc_len = 3 * cur_axis_count * (F2DOT14::static_size) + 4; 406 if (unlikely (!compiled_tuple_header.resize (alloc_len))) return false; 407 408 unsigned flag = 0; 409 /* skip the first 4 header bytes: variationDataSize+tupleIndex */ 410 F2DOT14* p = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.begin () + 4); 411 F2DOT14* end = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.end ()); 412 hb_array_t<F2DOT14> coords (p, end - p); 413 414 /* encode peak coords */ 415 unsigned peak_count = 0; 416 unsigned *shared_tuple_idx; 417 if (shared_tuples_idx_map && 418 shared_tuples_idx_map->has (&compiled_peak_coords, &shared_tuple_idx)) 419 { 420 flag = *shared_tuple_idx; 421 } 422 else 423 { 424 peak_count = encode_peak_coords(coords, flag, axes_index_map, axes_old_index_tag_map); 425 if (!peak_count) return false; 426 } 427 428 /* encode interim coords, it's optional so returned num could be 0 */ 429 unsigned interim_count = encode_interm_coords (coords.sub_array (peak_count), flag, axes_index_map, axes_old_index_tag_map); 430 431 /* pointdata length = 0 implies "use shared points" */ 432 if (points_data_length) 433 flag |= TupleVariationHeader::TuppleIndex::PrivatePointNumbers; 434 435 unsigned serialized_data_size = points_data_length + compiled_deltas.length; 436 TupleVariationHeader *o = reinterpret_cast<TupleVariationHeader *> (compiled_tuple_header.begin ()); 437 o->varDataSize = serialized_data_size; 438 o->tupleIndex = flag; 439 440 unsigned total_header_len = 4 + (peak_count + interim_count) * (F2DOT14::static_size); 441 return compiled_tuple_header.resize (total_header_len); 442 } 443 encode_peak_coordsOT::tuple_delta_t444 unsigned encode_peak_coords (hb_array_t<F2DOT14> peak_coords, 445 unsigned& flag, 446 const hb_map_t& axes_index_map, 447 const hb_map_t& axes_old_index_tag_map) const 448 { 449 unsigned orig_axis_count = axes_old_index_tag_map.get_population (); 450 auto it = peak_coords.iter (); 451 unsigned count = 0; 452 for (unsigned i = 0; i < orig_axis_count; i++) 453 { 454 if (!axes_index_map.has (i)) /* axis pinned */ 455 continue; 456 hb_tag_t axis_tag = axes_old_index_tag_map.get (i); 457 Triple *coords; 458 if (!axis_tuples.has (axis_tag, &coords)) 459 (*it).set_int (0); 460 else 461 (*it).set_float (coords->middle); 462 it++; 463 count++; 464 } 465 flag |= TupleVariationHeader::TuppleIndex::EmbeddedPeakTuple; 466 return count; 467 } 468 469 /* if no need to encode intermediate coords, then just return p */ encode_interm_coordsOT::tuple_delta_t470 unsigned encode_interm_coords (hb_array_t<F2DOT14> coords, 471 unsigned& flag, 472 const hb_map_t& axes_index_map, 473 const hb_map_t& axes_old_index_tag_map) const 474 { 475 unsigned orig_axis_count = axes_old_index_tag_map.get_population (); 476 unsigned cur_axis_count = axes_index_map.get_population (); 477 478 auto start_coords_iter = coords.sub_array (0, cur_axis_count).iter (); 479 auto end_coords_iter = coords.sub_array (cur_axis_count).iter (); 480 bool encode_needed = false; 481 unsigned count = 0; 482 for (unsigned i = 0; i < orig_axis_count; i++) 483 { 484 if (!axes_index_map.has (i)) /* axis pinned */ 485 continue; 486 hb_tag_t axis_tag = axes_old_index_tag_map.get (i); 487 Triple *coords; 488 float min_val = 0.f, val = 0.f, max_val = 0.f; 489 if (axis_tuples.has (axis_tag, &coords)) 490 { 491 min_val = coords->minimum; 492 val = coords->middle; 493 max_val = coords->maximum; 494 } 495 496 (*start_coords_iter).set_float (min_val); 497 (*end_coords_iter).set_float (max_val); 498 499 start_coords_iter++; 500 end_coords_iter++; 501 count += 2; 502 if (min_val != hb_min (val, 0.f) || max_val != hb_max (val, 0.f)) 503 encode_needed = true; 504 } 505 506 if (encode_needed) 507 { 508 flag |= TupleVariationHeader::TuppleIndex::IntermediateRegion; 509 return count; 510 } 511 return 0; 512 } 513 compile_deltasOT::tuple_delta_t514 bool compile_deltas () 515 { return compile_deltas (indices, deltas_x, deltas_y, compiled_deltas); } 516 compile_deltasOT::tuple_delta_t517 static bool compile_deltas (const hb_vector_t<bool> &point_indices, 518 const hb_vector_t<double> &x_deltas, 519 const hb_vector_t<double> &y_deltas, 520 hb_vector_t<unsigned char> &compiled_deltas /* OUT */) 521 { 522 hb_vector_t<int> rounded_deltas; 523 if (unlikely (!rounded_deltas.alloc (point_indices.length))) 524 return false; 525 526 for (unsigned i = 0; i < point_indices.length; i++) 527 { 528 if (!point_indices[i]) continue; 529 int rounded_delta = (int) roundf (x_deltas.arrayZ[i]); 530 rounded_deltas.push (rounded_delta); 531 } 532 533 if (!rounded_deltas) return true; 534 /* allocate enough memories 5 * num_deltas */ 535 unsigned alloc_len = 5 * rounded_deltas.length; 536 if (y_deltas) 537 alloc_len *= 2; 538 539 if (unlikely (!compiled_deltas.resize (alloc_len))) return false; 540 541 unsigned encoded_len = compile_deltas (compiled_deltas, rounded_deltas); 542 543 if (y_deltas) 544 { 545 /* reuse the rounded_deltas vector, check that y_deltas have the same num of deltas as x_deltas */ 546 unsigned j = 0; 547 for (unsigned idx = 0; idx < point_indices.length; idx++) 548 { 549 if (!point_indices[idx]) continue; 550 int rounded_delta = (int) roundf (y_deltas.arrayZ[idx]); 551 552 if (j >= rounded_deltas.length) return false; 553 554 rounded_deltas[j++] = rounded_delta; 555 } 556 557 if (j != rounded_deltas.length) return false; 558 encoded_len += compile_deltas (compiled_deltas.as_array ().sub_array (encoded_len), rounded_deltas); 559 } 560 return compiled_deltas.resize (encoded_len); 561 } 562 compile_deltasOT::tuple_delta_t563 static unsigned compile_deltas (hb_array_t<unsigned char> encoded_bytes, 564 hb_array_t<const int> deltas) 565 { 566 return TupleValues::compile (deltas, encoded_bytes); 567 } 568 calc_inferred_deltasOT::tuple_delta_t569 bool calc_inferred_deltas (const contour_point_vector_t& orig_points) 570 { 571 unsigned point_count = orig_points.length; 572 if (point_count != indices.length) 573 return false; 574 575 unsigned ref_count = 0; 576 hb_vector_t<unsigned> end_points; 577 578 for (unsigned i = 0; i < point_count; i++) 579 { 580 if (indices.arrayZ[i]) 581 ref_count++; 582 if (orig_points.arrayZ[i].is_end_point) 583 end_points.push (i); 584 } 585 /* all points are referenced, nothing to do */ 586 if (ref_count == point_count) 587 return true; 588 if (unlikely (end_points.in_error ())) return false; 589 590 hb_set_t inferred_idxes; 591 unsigned start_point = 0; 592 for (unsigned end_point : end_points) 593 { 594 /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */ 595 unsigned unref_count = 0; 596 for (unsigned i = start_point; i < end_point + 1; i++) 597 unref_count += indices.arrayZ[i]; 598 unref_count = (end_point - start_point + 1) - unref_count; 599 600 unsigned j = start_point; 601 if (unref_count == 0 || unref_count > end_point - start_point) 602 goto no_more_gaps; 603 for (;;) 604 { 605 /* Locate the next gap of unreferenced points between two referenced points prev and next. 606 * Note that a gap may wrap around at left (start_point) and/or at right (end_point). 607 */ 608 unsigned int prev, next, i; 609 for (;;) 610 { 611 i = j; 612 j = next_index (i, start_point, end_point); 613 if (indices.arrayZ[i] && !indices.arrayZ[j]) break; 614 } 615 prev = j = i; 616 for (;;) 617 { 618 i = j; 619 j = next_index (i, start_point, end_point); 620 if (!indices.arrayZ[i] && indices.arrayZ[j]) break; 621 } 622 next = j; 623 /* Infer deltas for all unref points in the gap between prev and next */ 624 i = prev; 625 for (;;) 626 { 627 i = next_index (i, start_point, end_point); 628 if (i == next) break; 629 deltas_x.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].x, 630 (double) orig_points.arrayZ[prev].x, 631 (double) orig_points.arrayZ[next].x, 632 deltas_x.arrayZ[prev], deltas_x.arrayZ[next]); 633 deltas_y.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].y, 634 (double) orig_points.arrayZ[prev].y, 635 (double) orig_points.arrayZ[next].y, 636 deltas_y.arrayZ[prev], deltas_y.arrayZ[next]); 637 inferred_idxes.add (i); 638 if (--unref_count == 0) goto no_more_gaps; 639 } 640 } 641 no_more_gaps: 642 start_point = end_point + 1; 643 } 644 645 for (unsigned i = 0; i < point_count; i++) 646 { 647 /* if points are not referenced and deltas are not inferred, set to 0. 648 * reference all points for gvar */ 649 if ( !indices[i]) 650 { 651 if (!inferred_idxes.has (i)) 652 { 653 deltas_x.arrayZ[i] = 0.0; 654 deltas_y.arrayZ[i] = 0.0; 655 } 656 indices[i] = true; 657 } 658 } 659 return true; 660 } 661 optimizeOT::tuple_delta_t662 bool optimize (const contour_point_vector_t& contour_points, 663 bool is_composite, 664 double tolerance = 0.5 + 1e-10) 665 { 666 unsigned count = contour_points.length; 667 if (deltas_x.length != count || 668 deltas_y.length != count) 669 return false; 670 671 hb_vector_t<bool> opt_indices; 672 hb_vector_t<int> rounded_x_deltas, rounded_y_deltas; 673 674 if (unlikely (!rounded_x_deltas.alloc (count) || 675 !rounded_y_deltas.alloc (count))) 676 return false; 677 678 for (unsigned i = 0; i < count; i++) 679 { 680 int rounded_x_delta = (int) roundf (deltas_x.arrayZ[i]); 681 int rounded_y_delta = (int) roundf (deltas_y.arrayZ[i]); 682 rounded_x_deltas.push (rounded_x_delta); 683 rounded_y_deltas.push (rounded_y_delta); 684 } 685 686 if (!iup_delta_optimize (contour_points, rounded_x_deltas, rounded_y_deltas, opt_indices, tolerance)) 687 return false; 688 689 unsigned ref_count = 0; 690 for (bool ref_flag : opt_indices) 691 ref_count += ref_flag; 692 693 if (ref_count == count) return true; 694 695 hb_vector_t<double> opt_deltas_x, opt_deltas_y; 696 bool is_comp_glyph_wo_deltas = (is_composite && ref_count == 0); 697 if (is_comp_glyph_wo_deltas) 698 { 699 if (unlikely (!opt_deltas_x.resize (count) || 700 !opt_deltas_y.resize (count))) 701 return false; 702 703 opt_indices.arrayZ[0] = true; 704 for (unsigned i = 1; i < count; i++) 705 opt_indices.arrayZ[i] = false; 706 } 707 708 hb_vector_t<unsigned char> opt_point_data; 709 if (!compile_point_set (opt_indices, opt_point_data)) 710 return false; 711 hb_vector_t<unsigned char> opt_deltas_data; 712 if (!compile_deltas (opt_indices, 713 is_comp_glyph_wo_deltas ? opt_deltas_x : deltas_x, 714 is_comp_glyph_wo_deltas ? opt_deltas_y : deltas_y, 715 opt_deltas_data)) 716 return false; 717 718 hb_vector_t<unsigned char> point_data; 719 if (!compile_point_set (indices, point_data)) 720 return false; 721 hb_vector_t<unsigned char> deltas_data; 722 if (!compile_deltas (indices, deltas_x, deltas_y, deltas_data)) 723 return false; 724 725 if (opt_point_data.length + opt_deltas_data.length < point_data.length + deltas_data.length) 726 { 727 indices.fini (); 728 indices = std::move (opt_indices); 729 730 if (is_comp_glyph_wo_deltas) 731 { 732 deltas_x.fini (); 733 deltas_x = std::move (opt_deltas_x); 734 735 deltas_y.fini (); 736 deltas_y = std::move (opt_deltas_y); 737 } 738 } 739 return !indices.in_error () && !deltas_x.in_error () && !deltas_y.in_error (); 740 } 741 compile_point_setOT::tuple_delta_t742 static bool compile_point_set (const hb_vector_t<bool> &point_indices, 743 hb_vector_t<unsigned char>& compiled_points /* OUT */) 744 { 745 unsigned num_points = 0; 746 for (bool i : point_indices) 747 if (i) num_points++; 748 749 /* when iup optimization is enabled, num of referenced points could be 0 */ 750 if (!num_points) return true; 751 752 unsigned indices_length = point_indices.length; 753 /* If the points set consists of all points in the glyph, it's encoded with a 754 * single zero byte */ 755 if (num_points == indices_length) 756 return compiled_points.resize (1); 757 758 /* allocate enough memories: 2 bytes for count + 3 bytes for each point */ 759 unsigned num_bytes = 2 + 3 *num_points; 760 if (unlikely (!compiled_points.resize (num_bytes, false))) 761 return false; 762 763 unsigned pos = 0; 764 /* binary data starts with the total number of reference points */ 765 if (num_points < 0x80) 766 compiled_points.arrayZ[pos++] = num_points; 767 else 768 { 769 compiled_points.arrayZ[pos++] = ((num_points >> 8) | 0x80); 770 compiled_points.arrayZ[pos++] = num_points & 0xFF; 771 } 772 773 const unsigned max_run_length = 0x7F; 774 unsigned i = 0; 775 unsigned last_value = 0; 776 unsigned num_encoded = 0; 777 while (i < indices_length && num_encoded < num_points) 778 { 779 unsigned run_length = 0; 780 unsigned header_pos = pos; 781 compiled_points.arrayZ[pos++] = 0; 782 783 bool use_byte_encoding = false; 784 bool new_run = true; 785 while (i < indices_length && num_encoded < num_points && 786 run_length <= max_run_length) 787 { 788 // find out next referenced point index 789 while (i < indices_length && !point_indices[i]) 790 i++; 791 792 if (i >= indices_length) break; 793 794 unsigned cur_value = i; 795 unsigned delta = cur_value - last_value; 796 797 if (new_run) 798 { 799 use_byte_encoding = (delta <= 0xFF); 800 new_run = false; 801 } 802 803 if (use_byte_encoding && delta > 0xFF) 804 break; 805 806 if (use_byte_encoding) 807 compiled_points.arrayZ[pos++] = delta; 808 else 809 { 810 compiled_points.arrayZ[pos++] = delta >> 8; 811 compiled_points.arrayZ[pos++] = delta & 0xFF; 812 } 813 i++; 814 last_value = cur_value; 815 run_length++; 816 num_encoded++; 817 } 818 819 if (use_byte_encoding) 820 compiled_points.arrayZ[header_pos] = run_length - 1; 821 else 822 compiled_points.arrayZ[header_pos] = (run_length - 1) | 0x80; 823 } 824 return compiled_points.resize (pos, false); 825 } 826 infer_deltaOT::tuple_delta_t827 static double infer_delta (double target_val, double prev_val, double next_val, double prev_delta, double next_delta) 828 { 829 if (prev_val == next_val) 830 return (prev_delta == next_delta) ? prev_delta : 0.0; 831 else if (target_val <= hb_min (prev_val, next_val)) 832 return (prev_val < next_val) ? prev_delta : next_delta; 833 else if (target_val >= hb_max (prev_val, next_val)) 834 return (prev_val > next_val) ? prev_delta : next_delta; 835 836 double r = (target_val - prev_val) / (next_val - prev_val); 837 return prev_delta + r * (next_delta - prev_delta); 838 } 839 next_indexOT::tuple_delta_t840 static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end) 841 { return (i >= end) ? start : (i + 1); } 842 }; 843 844 struct TupleVariationData 845 { sanitizeOT::TupleVariationData846 bool sanitize (hb_sanitize_context_t *c) const 847 { 848 TRACE_SANITIZE (this); 849 // here check on min_size only, TupleVariationHeader and var data will be 850 // checked while accessing through iterator. 851 return_trace (c->check_struct (this)); 852 } 853 get_sizeOT::TupleVariationData854 unsigned get_size (unsigned axis_count) const 855 { 856 unsigned total_size = min_size; 857 unsigned count = tupleVarCount.get_count (); 858 const TupleVariationHeader *tuple_var_header = &(get_tuple_var_header()); 859 for (unsigned i = 0; i < count; i++) 860 { 861 total_size += tuple_var_header->get_size (axis_count) + tuple_var_header->get_data_size (); 862 tuple_var_header = &tuple_var_header->get_next (axis_count); 863 } 864 865 return total_size; 866 } 867 get_tuple_var_headerOT::TupleVariationData868 const TupleVariationHeader &get_tuple_var_header (void) const 869 { return StructAfter<TupleVariationHeader> (data); } 870 871 struct tuple_iterator_t; 872 struct tuple_variations_t 873 { 874 hb_vector_t<tuple_delta_t> tuple_vars; 875 876 private: 877 /* referenced point set->compiled point data map */ 878 hb_hashmap_t<const hb_vector_t<bool>*, hb_vector_t<char>> point_data_map; 879 /* referenced point set-> count map, used in finding shared points */ 880 hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map; 881 882 /* empty for non-gvar tuples. 883 * shared_points_bytes is a pointer to some value in the point_data_map, 884 * which will be freed during map destruction. Save it for serialization, so 885 * no need to do find_shared_points () again */ 886 hb_vector_t<char> *shared_points_bytes = nullptr; 887 888 /* total compiled byte size as TupleVariationData format, initialized to 0 */ 889 unsigned compiled_byte_size = 0; 890 bool needs_padding = false; 891 892 /* for gvar iup delta optimization: whether this is a composite glyph */ 893 bool is_composite = false; 894 895 public: 896 tuple_variations_t () = default; 897 tuple_variations_t (const tuple_variations_t&) = delete; 898 tuple_variations_t& operator=(const tuple_variations_t&) = delete; 899 tuple_variations_t (tuple_variations_t&&) = default; 900 tuple_variations_t& operator=(tuple_variations_t&&) = default; 901 ~tuple_variations_t () = default; 902 operator boolOT::TupleVariationData::tuple_variations_t903 explicit operator bool () const { return bool (tuple_vars); } get_var_countOT::TupleVariationData::tuple_variations_t904 unsigned get_var_count () const 905 { 906 unsigned count = 0; 907 /* when iup delta opt is enabled, compiled_deltas could be empty and we 908 * should skip this tuple */ 909 for (auto& tuple: tuple_vars) 910 if (tuple.compiled_deltas) count++; 911 912 if (shared_points_bytes && shared_points_bytes->length) 913 count |= TupleVarCount::SharedPointNumbers; 914 return count; 915 } 916 get_compiled_byte_sizeOT::TupleVariationData::tuple_variations_t917 unsigned get_compiled_byte_size () const 918 { return compiled_byte_size; } 919 create_from_tuple_var_dataOT::TupleVariationData::tuple_variations_t920 bool create_from_tuple_var_data (tuple_iterator_t iterator, 921 unsigned tuple_var_count, 922 unsigned point_count, 923 bool is_gvar, 924 const hb_map_t *axes_old_index_tag_map, 925 const hb_vector_t<unsigned> &shared_indices, 926 const hb_array_t<const F2DOT14> shared_tuples, 927 bool is_composite_glyph) 928 { 929 do 930 { 931 const HBUINT8 *p = iterator.get_serialized_data (); 932 unsigned int length = iterator.current_tuple->get_data_size (); 933 if (unlikely (!iterator.var_data_bytes.check_range (p, length))) 934 return false; 935 936 hb_hashmap_t<hb_tag_t, Triple> axis_tuples; 937 if (!iterator.current_tuple->unpack_axis_tuples (iterator.get_axis_count (), shared_tuples, axes_old_index_tag_map, axis_tuples) 938 || axis_tuples.is_empty ()) 939 return false; 940 941 hb_vector_t<unsigned> private_indices; 942 bool has_private_points = iterator.current_tuple->has_private_points (); 943 const HBUINT8 *end = p + length; 944 if (has_private_points && 945 !TupleVariationData::decompile_points (p, private_indices, end)) 946 return false; 947 948 const hb_vector_t<unsigned> &indices = has_private_points ? private_indices : shared_indices; 949 bool apply_to_all = (indices.length == 0); 950 unsigned num_deltas = apply_to_all ? point_count : indices.length; 951 952 hb_vector_t<int> deltas_x; 953 954 if (unlikely (!deltas_x.resize (num_deltas, false) || 955 !TupleVariationData::decompile_deltas (p, deltas_x, end))) 956 return false; 957 958 hb_vector_t<int> deltas_y; 959 if (is_gvar) 960 { 961 if (unlikely (!deltas_y.resize (num_deltas, false) || 962 !TupleVariationData::decompile_deltas (p, deltas_y, end))) 963 return false; 964 } 965 966 tuple_delta_t var; 967 var.axis_tuples = std::move (axis_tuples); 968 if (unlikely (!var.indices.resize (point_count) || 969 !var.deltas_x.resize (point_count, false))) 970 return false; 971 972 if (is_gvar && unlikely (!var.deltas_y.resize (point_count, false))) 973 return false; 974 975 for (unsigned i = 0; i < num_deltas; i++) 976 { 977 unsigned idx = apply_to_all ? i : indices[i]; 978 if (idx >= point_count) continue; 979 var.indices[idx] = true; 980 var.deltas_x[idx] = deltas_x[i]; 981 if (is_gvar) 982 var.deltas_y[idx] = deltas_y[i]; 983 } 984 tuple_vars.push (std::move (var)); 985 } while (iterator.move_to_next ()); 986 987 is_composite = is_composite_glyph; 988 return true; 989 } 990 create_from_item_var_dataOT::TupleVariationData::tuple_variations_t991 bool create_from_item_var_data (const VarData &var_data, 992 const hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>>& regions, 993 const hb_map_t& axes_old_index_tag_map, 994 unsigned& item_count, 995 const hb_inc_bimap_t* inner_map = nullptr) 996 { 997 /* NULL offset, to keep original varidx valid, just return */ 998 if (&var_data == &Null (VarData)) 999 return true; 1000 1001 unsigned num_regions = var_data.get_region_index_count (); 1002 if (!tuple_vars.alloc (num_regions)) return false; 1003 1004 item_count = inner_map ? inner_map->get_population () : var_data.get_item_count (); 1005 if (!item_count) return true; 1006 unsigned row_size = var_data.get_row_size (); 1007 const HBUINT8 *delta_bytes = var_data.get_delta_bytes (); 1008 1009 for (unsigned r = 0; r < num_regions; r++) 1010 { 1011 /* In VarData, deltas are organized in rows, convert them into 1012 * column(region) based tuples, resize deltas_x first */ 1013 tuple_delta_t tuple; 1014 if (!tuple.deltas_x.resize (item_count, false) || 1015 !tuple.indices.resize (item_count, false)) 1016 return false; 1017 1018 for (unsigned i = 0; i < item_count; i++) 1019 { 1020 tuple.indices.arrayZ[i] = true; 1021 tuple.deltas_x.arrayZ[i] = var_data.get_item_delta_fast (inner_map ? inner_map->backward (i) : i, 1022 r, delta_bytes, row_size); 1023 } 1024 1025 unsigned region_index = var_data.get_region_index (r); 1026 if (region_index >= regions.length) return false; 1027 tuple.axis_tuples = regions.arrayZ[region_index]; 1028 1029 tuple_vars.push (std::move (tuple)); 1030 } 1031 return !tuple_vars.in_error (); 1032 } 1033 1034 private: _cmp_axis_tagOT::TupleVariationData::tuple_variations_t1035 static int _cmp_axis_tag (const void *pa, const void *pb) 1036 { 1037 const hb_tag_t *a = (const hb_tag_t*) pa; 1038 const hb_tag_t *b = (const hb_tag_t*) pb; 1039 return (int)(*a) - (int)(*b); 1040 } 1041 change_tuple_variations_axis_limitsOT::TupleVariationData::tuple_variations_t1042 bool change_tuple_variations_axis_limits (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location, 1043 const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances) 1044 { 1045 /* sort axis_tag/axis_limits, make result deterministic */ 1046 hb_vector_t<hb_tag_t> axis_tags; 1047 if (!axis_tags.alloc (normalized_axes_location.get_population ())) 1048 return false; 1049 for (auto t : normalized_axes_location.keys ()) 1050 axis_tags.push (t); 1051 1052 axis_tags.qsort (_cmp_axis_tag); 1053 for (auto axis_tag : axis_tags) 1054 { 1055 Triple *axis_limit; 1056 if (!normalized_axes_location.has (axis_tag, &axis_limit)) 1057 return false; 1058 TripleDistances axis_triple_distances{1.0, 1.0}; 1059 if (axes_triple_distances.has (axis_tag)) 1060 axis_triple_distances = axes_triple_distances.get (axis_tag); 1061 1062 hb_vector_t<tuple_delta_t> new_vars; 1063 for (const tuple_delta_t& var : tuple_vars) 1064 { 1065 hb_vector_t<tuple_delta_t> out = var.change_tuple_var_axis_limit (axis_tag, *axis_limit, axis_triple_distances); 1066 if (!out) continue; 1067 1068 unsigned new_len = new_vars.length + out.length; 1069 1070 if (unlikely (!new_vars.alloc (new_len, false))) 1071 return false; 1072 1073 for (unsigned i = 0; i < out.length; i++) 1074 new_vars.push (std::move (out[i])); 1075 } 1076 tuple_vars.fini (); 1077 tuple_vars = std::move (new_vars); 1078 } 1079 return true; 1080 } 1081 1082 /* merge tuple variations with overlapping tents, if iup delta optimization 1083 * is enabled, add default deltas to contour_points */ merge_tuple_variationsOT::TupleVariationData::tuple_variations_t1084 bool merge_tuple_variations (contour_point_vector_t* contour_points = nullptr) 1085 { 1086 hb_vector_t<tuple_delta_t> new_vars; 1087 hb_hashmap_t<const hb_hashmap_t<hb_tag_t, Triple>*, unsigned> m; 1088 unsigned i = 0; 1089 for (const tuple_delta_t& var : tuple_vars) 1090 { 1091 /* if all axes are pinned, drop the tuple variation */ 1092 if (var.axis_tuples.is_empty ()) 1093 { 1094 /* if iup_delta_optimize is enabled, add deltas to contour coords */ 1095 if (contour_points && !contour_points->add_deltas (var.deltas_x, 1096 var.deltas_y, 1097 var.indices)) 1098 return false; 1099 continue; 1100 } 1101 1102 unsigned *idx; 1103 if (m.has (&(var.axis_tuples), &idx)) 1104 { 1105 new_vars[*idx] += var; 1106 } 1107 else 1108 { 1109 new_vars.push (var); 1110 if (!m.set (&(var.axis_tuples), i)) 1111 return false; 1112 i++; 1113 } 1114 } 1115 tuple_vars.fini (); 1116 tuple_vars = std::move (new_vars); 1117 return true; 1118 } 1119 1120 /* compile all point set and store byte data in a point_set->hb_bytes_t hashmap, 1121 * also update point_set->count map, which will be used in finding shared 1122 * point set*/ compile_all_point_setsOT::TupleVariationData::tuple_variations_t1123 bool compile_all_point_sets () 1124 { 1125 for (const auto& tuple: tuple_vars) 1126 { 1127 const hb_vector_t<bool>* points_set = &(tuple.indices); 1128 if (point_data_map.has (points_set)) 1129 { 1130 unsigned *count; 1131 if (unlikely (!point_set_count_map.has (points_set, &count) || 1132 !point_set_count_map.set (points_set, (*count) + 1))) 1133 return false; 1134 continue; 1135 } 1136 1137 hb_vector_t<unsigned char> compiled_point_data; 1138 if (!tuple_delta_t::compile_point_set (*points_set, compiled_point_data)) 1139 return false; 1140 1141 if (!point_data_map.set (points_set, std::move (compiled_point_data)) || 1142 !point_set_count_map.set (points_set, 1)) 1143 return false; 1144 } 1145 return true; 1146 } 1147 1148 /* find shared points set which saves most bytes */ find_shared_pointsOT::TupleVariationData::tuple_variations_t1149 void find_shared_points () 1150 { 1151 unsigned max_saved_bytes = 0; 1152 1153 for (const auto& _ : point_data_map.iter_ref ()) 1154 { 1155 const hb_vector_t<bool>* points_set = _.first; 1156 unsigned data_length = _.second.length; 1157 if (!data_length) continue; 1158 unsigned *count; 1159 if (unlikely (!point_set_count_map.has (points_set, &count) || 1160 *count <= 1)) 1161 { 1162 shared_points_bytes = nullptr; 1163 return; 1164 } 1165 1166 unsigned saved_bytes = data_length * ((*count) -1); 1167 if (saved_bytes > max_saved_bytes) 1168 { 1169 max_saved_bytes = saved_bytes; 1170 shared_points_bytes = &(_.second); 1171 } 1172 } 1173 } 1174 calc_inferred_deltasOT::TupleVariationData::tuple_variations_t1175 bool calc_inferred_deltas (const contour_point_vector_t& contour_points) 1176 { 1177 for (tuple_delta_t& var : tuple_vars) 1178 if (!var.calc_inferred_deltas (contour_points)) 1179 return false; 1180 1181 return true; 1182 } 1183 iup_optimizeOT::TupleVariationData::tuple_variations_t1184 bool iup_optimize (const contour_point_vector_t& contour_points) 1185 { 1186 for (tuple_delta_t& var : tuple_vars) 1187 { 1188 if (!var.optimize (contour_points, is_composite)) 1189 return false; 1190 } 1191 return true; 1192 } 1193 1194 public: instantiateOT::TupleVariationData::tuple_variations_t1195 bool instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location, 1196 const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances, 1197 contour_point_vector_t* contour_points = nullptr, 1198 bool optimize = false) 1199 { 1200 if (!tuple_vars) return true; 1201 if (!change_tuple_variations_axis_limits (normalized_axes_location, axes_triple_distances)) 1202 return false; 1203 /* compute inferred deltas only for gvar */ 1204 if (contour_points) 1205 if (!calc_inferred_deltas (*contour_points)) 1206 return false; 1207 1208 /* if iup delta opt is on, contour_points can't be null */ 1209 if (optimize && !contour_points) 1210 return false; 1211 1212 if (!merge_tuple_variations (optimize ? contour_points : nullptr)) 1213 return false; 1214 1215 if (optimize && !iup_optimize (*contour_points)) return false; 1216 return !tuple_vars.in_error (); 1217 } 1218 compile_bytesOT::TupleVariationData::tuple_variations_t1219 bool compile_bytes (const hb_map_t& axes_index_map, 1220 const hb_map_t& axes_old_index_tag_map, 1221 bool use_shared_points, 1222 bool is_gvar = false, 1223 const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map = nullptr) 1224 { 1225 // return true for empty glyph 1226 if (!tuple_vars) 1227 return true; 1228 1229 // compile points set and store data in hashmap 1230 if (!compile_all_point_sets ()) 1231 return false; 1232 1233 /* total compiled byte size as TupleVariationData format, initialized to its 1234 * min_size: 4 */ 1235 compiled_byte_size += 4; 1236 1237 if (use_shared_points) 1238 { 1239 find_shared_points (); 1240 if (shared_points_bytes) 1241 compiled_byte_size += shared_points_bytes->length; 1242 } 1243 // compile delta and tuple var header for each tuple variation 1244 for (auto& tuple: tuple_vars) 1245 { 1246 const hb_vector_t<bool>* points_set = &(tuple.indices); 1247 hb_vector_t<char> *points_data; 1248 if (unlikely (!point_data_map.has (points_set, &points_data))) 1249 return false; 1250 1251 /* when iup optimization is enabled, num of referenced points could be 0 1252 * and thus the compiled points bytes is empty, we should skip compiling 1253 * this tuple */ 1254 if (!points_data->length) 1255 continue; 1256 if (!tuple.compile_deltas ()) 1257 return false; 1258 1259 unsigned points_data_length = (points_data != shared_points_bytes) ? points_data->length : 0; 1260 if (!tuple.compile_tuple_var_header (axes_index_map, points_data_length, axes_old_index_tag_map, 1261 shared_tuples_idx_map)) 1262 return false; 1263 compiled_byte_size += tuple.compiled_tuple_header.length + points_data_length + tuple.compiled_deltas.length; 1264 } 1265 1266 if (is_gvar && (compiled_byte_size % 2)) 1267 { 1268 needs_padding = true; 1269 compiled_byte_size += 1; 1270 } 1271 1272 return true; 1273 } 1274 serialize_var_headersOT::TupleVariationData::tuple_variations_t1275 bool serialize_var_headers (hb_serialize_context_t *c, unsigned& total_header_len) const 1276 { 1277 TRACE_SERIALIZE (this); 1278 for (const auto& tuple: tuple_vars) 1279 { 1280 tuple.compiled_tuple_header.as_array ().copy (c); 1281 if (c->in_error ()) return_trace (false); 1282 total_header_len += tuple.compiled_tuple_header.length; 1283 } 1284 return_trace (true); 1285 } 1286 serialize_var_dataOT::TupleVariationData::tuple_variations_t1287 bool serialize_var_data (hb_serialize_context_t *c, bool is_gvar) const 1288 { 1289 TRACE_SERIALIZE (this); 1290 if (is_gvar && shared_points_bytes) 1291 { 1292 hb_bytes_t s (shared_points_bytes->arrayZ, shared_points_bytes->length); 1293 s.copy (c); 1294 } 1295 1296 for (const auto& tuple: tuple_vars) 1297 { 1298 const hb_vector_t<bool>* points_set = &(tuple.indices); 1299 hb_vector_t<char> *point_data; 1300 if (!point_data_map.has (points_set, &point_data)) 1301 return_trace (false); 1302 1303 if (!is_gvar || point_data != shared_points_bytes) 1304 { 1305 hb_bytes_t s (point_data->arrayZ, point_data->length); 1306 s.copy (c); 1307 } 1308 1309 tuple.compiled_deltas.as_array ().copy (c); 1310 if (c->in_error ()) return_trace (false); 1311 } 1312 1313 /* padding for gvar */ 1314 if (is_gvar && needs_padding) 1315 { 1316 HBUINT8 pad; 1317 pad = 0; 1318 if (!c->embed (pad)) return_trace (false); 1319 } 1320 return_trace (true); 1321 } 1322 }; 1323 1324 struct tuple_iterator_t 1325 { get_axis_countOT::TupleVariationData::tuple_iterator_t1326 unsigned get_axis_count () const { return axis_count; } 1327 initOT::TupleVariationData::tuple_iterator_t1328 void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_, const void *table_base_) 1329 { 1330 var_data_bytes = var_data_bytes_; 1331 var_data = var_data_bytes_.as<TupleVariationData> (); 1332 index = 0; 1333 axis_count = axis_count_; 1334 current_tuple = &var_data->get_tuple_var_header (); 1335 data_offset = 0; 1336 table_base = table_base_; 1337 } 1338 get_shared_indicesOT::TupleVariationData::tuple_iterator_t1339 bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */) 1340 { 1341 if (var_data->has_shared_point_numbers ()) 1342 { 1343 const HBUINT8 *base = &(table_base+var_data->data); 1344 const HBUINT8 *p = base; 1345 if (!decompile_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false; 1346 data_offset = p - base; 1347 } 1348 return true; 1349 } 1350 is_validOT::TupleVariationData::tuple_iterator_t1351 bool is_valid () const 1352 { 1353 return (index < var_data->tupleVarCount.get_count ()) && 1354 var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) && 1355 var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (), 1356 current_tuple->get_size (axis_count))); 1357 } 1358 move_to_nextOT::TupleVariationData::tuple_iterator_t1359 bool move_to_next () 1360 { 1361 data_offset += current_tuple->get_data_size (); 1362 current_tuple = ¤t_tuple->get_next (axis_count); 1363 index++; 1364 return is_valid (); 1365 } 1366 get_serialized_dataOT::TupleVariationData::tuple_iterator_t1367 const HBUINT8 *get_serialized_data () const 1368 { return &(table_base+var_data->data) + data_offset; } 1369 1370 private: 1371 const TupleVariationData *var_data; 1372 unsigned int index; 1373 unsigned int axis_count; 1374 unsigned int data_offset; 1375 const void *table_base; 1376 1377 public: 1378 hb_bytes_t var_data_bytes; 1379 const TupleVariationHeader *current_tuple; 1380 }; 1381 get_tuple_iteratorOT::TupleVariationData1382 static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count, 1383 const void *table_base, 1384 hb_vector_t<unsigned int> &shared_indices /* OUT */, 1385 tuple_iterator_t *iterator /* OUT */) 1386 { 1387 iterator->init (var_data_bytes, axis_count, table_base); 1388 if (!iterator->get_shared_indices (shared_indices)) 1389 return false; 1390 return iterator->is_valid (); 1391 } 1392 has_shared_point_numbersOT::TupleVariationData1393 bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); } 1394 decompile_pointsOT::TupleVariationData1395 static bool decompile_points (const HBUINT8 *&p /* IN/OUT */, 1396 hb_vector_t<unsigned int> &points /* OUT */, 1397 const HBUINT8 *end) 1398 { 1399 enum packed_point_flag_t 1400 { 1401 POINTS_ARE_WORDS = 0x80, 1402 POINT_RUN_COUNT_MASK = 0x7F 1403 }; 1404 1405 if (unlikely (p + 1 > end)) return false; 1406 1407 unsigned count = *p++; 1408 if (count & POINTS_ARE_WORDS) 1409 { 1410 if (unlikely (p + 1 > end)) return false; 1411 count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++; 1412 } 1413 if (unlikely (!points.resize (count, false))) return false; 1414 1415 unsigned n = 0; 1416 unsigned i = 0; 1417 while (i < count) 1418 { 1419 if (unlikely (p + 1 > end)) return false; 1420 unsigned control = *p++; 1421 unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1; 1422 unsigned stop = i + run_count; 1423 if (unlikely (stop > count)) return false; 1424 if (control & POINTS_ARE_WORDS) 1425 { 1426 if (unlikely (p + run_count * HBUINT16::static_size > end)) return false; 1427 for (; i < stop; i++) 1428 { 1429 n += *(const HBUINT16 *)p; 1430 points.arrayZ[i] = n; 1431 p += HBUINT16::static_size; 1432 } 1433 } 1434 else 1435 { 1436 if (unlikely (p + run_count > end)) return false; 1437 for (; i < stop; i++) 1438 { 1439 n += *p++; 1440 points.arrayZ[i] = n; 1441 } 1442 } 1443 } 1444 return true; 1445 } 1446 1447 template <typename T> decompile_deltasOT::TupleVariationData1448 static bool decompile_deltas (const HBUINT8 *&p /* IN/OUT */, 1449 hb_vector_t<T> &deltas /* IN/OUT */, 1450 const HBUINT8 *end, 1451 bool consume_all = false) 1452 { 1453 return TupleValues::decompile (p, deltas, end, consume_all); 1454 } 1455 has_dataOT::TupleVariationData1456 bool has_data () const { return tupleVarCount; } 1457 decompile_tuple_variationsOT::TupleVariationData1458 bool decompile_tuple_variations (unsigned point_count, 1459 bool is_gvar, 1460 tuple_iterator_t iterator, 1461 const hb_map_t *axes_old_index_tag_map, 1462 const hb_vector_t<unsigned> &shared_indices, 1463 const hb_array_t<const F2DOT14> shared_tuples, 1464 tuple_variations_t& tuple_variations, /* OUT */ 1465 bool is_composite_glyph = false) const 1466 { 1467 return tuple_variations.create_from_tuple_var_data (iterator, tupleVarCount, 1468 point_count, is_gvar, 1469 axes_old_index_tag_map, 1470 shared_indices, 1471 shared_tuples, 1472 is_composite_glyph); 1473 } 1474 serializeOT::TupleVariationData1475 bool serialize (hb_serialize_context_t *c, 1476 bool is_gvar, 1477 const tuple_variations_t& tuple_variations) const 1478 { 1479 TRACE_SERIALIZE (this); 1480 /* empty tuple variations, just return and skip serialization. */ 1481 if (!tuple_variations) return_trace (true); 1482 1483 auto *out = c->start_embed (this); 1484 if (unlikely (!c->extend_min (out))) return_trace (false); 1485 1486 if (!c->check_assign (out->tupleVarCount, tuple_variations.get_var_count (), 1487 HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false); 1488 1489 unsigned total_header_len = 0; 1490 1491 if (!tuple_variations.serialize_var_headers (c, total_header_len)) 1492 return_trace (false); 1493 1494 unsigned data_offset = min_size + total_header_len; 1495 if (!is_gvar) data_offset += 4; 1496 if (!c->check_assign (out->data, data_offset, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false); 1497 1498 return tuple_variations.serialize_var_data (c, is_gvar); 1499 } 1500 1501 protected: 1502 struct TupleVarCount : HBUINT16 1503 { 1504 friend struct tuple_variations_t; has_shared_point_numbersOT::TupleVariationData::TupleVarCount1505 bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); } get_countOT::TupleVariationData::TupleVarCount1506 unsigned int get_count () const { return (*this) & CountMask; } operator =OT::TupleVariationData::TupleVarCount1507 TupleVarCount& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; } operator boolOT::TupleVariationData::TupleVarCount1508 explicit operator bool () const { return get_count (); } 1509 1510 protected: 1511 enum Flags 1512 { 1513 SharedPointNumbers= 0x8000u, 1514 CountMask = 0x0FFFu 1515 }; 1516 public: 1517 DEFINE_SIZE_STATIC (2); 1518 }; 1519 1520 TupleVarCount tupleVarCount; /* A packed field. The high 4 bits are flags, and the 1521 * low 12 bits are the number of tuple variation tables 1522 * for this glyph. The number of tuple variation tables 1523 * can be any number between 1 and 4095. */ 1524 Offset16To<HBUINT8> 1525 data; /* Offset from the start of the base table 1526 * to the serialized data. */ 1527 /* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */ 1528 public: 1529 DEFINE_SIZE_MIN (4); 1530 }; 1531 1532 using tuple_variations_t = TupleVariationData::tuple_variations_t; 1533 struct item_variations_t 1534 { 1535 using region_t = const hb_hashmap_t<hb_tag_t, Triple>*; 1536 private: 1537 /* each subtable is decompiled into a tuple_variations_t, in which all tuples 1538 * have the same num of deltas (rows) */ 1539 hb_vector_t<tuple_variations_t> vars; 1540 1541 /* num of retained rows for each subtable, there're 2 cases when var_data is empty: 1542 * 1. retained item_count is zero 1543 * 2. regions is empty and item_count is non-zero. 1544 * when converting to tuples, both will be dropped because the tuple is empty, 1545 * however, we need to retain 2. as all-zero rows to keep original varidx 1546 * valid, so we need a way to remember the num of rows for each subtable */ 1547 hb_vector_t<unsigned> var_data_num_rows; 1548 1549 /* original region list, decompiled from item varstore, used when rebuilding 1550 * region list after instantiation */ 1551 hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>> orig_region_list; 1552 1553 /* region list: vector of Regions, maintain the original order for the regions 1554 * that existed before instantiate (), append the new regions at the end. 1555 * Regions are stored in each tuple already, save pointers only. 1556 * When converting back to item varstore, unused regions will be pruned */ 1557 hb_vector_t<region_t> region_list; 1558 1559 /* region -> idx map after instantiation and pruning unused regions */ 1560 hb_hashmap_t<region_t, unsigned> region_map; 1561 1562 /* all delta rows after instantiation */ 1563 hb_vector_t<hb_vector_t<int>> delta_rows; 1564 /* final optimized vector of encoding objects used to assemble the varstore */ 1565 hb_vector_t<delta_row_encoding_t> encodings; 1566 1567 /* old varidxes -> new var_idxes map */ 1568 hb_map_t varidx_map; 1569 1570 /* has long words */ 1571 bool has_long = false; 1572 1573 public: has_long_wordOT::item_variations_t1574 bool has_long_word () const 1575 { return has_long; } 1576 get_region_listOT::item_variations_t1577 const hb_vector_t<region_t>& get_region_list () const 1578 { return region_list; } 1579 get_vardata_encodingsOT::item_variations_t1580 const hb_vector_t<delta_row_encoding_t>& get_vardata_encodings () const 1581 { return encodings; } 1582 get_varidx_mapOT::item_variations_t1583 const hb_map_t& get_varidx_map () const 1584 { return varidx_map; } 1585 instantiateOT::item_variations_t1586 bool instantiate (const ItemVariationStore& varStore, 1587 const hb_subset_plan_t *plan, 1588 bool optimize=true, 1589 bool use_no_variation_idx=true, 1590 const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ()) 1591 { 1592 if (!create_from_item_varstore (varStore, plan->axes_old_index_tag_map, inner_maps)) 1593 return false; 1594 if (!instantiate_tuple_vars (plan->axes_location, plan->axes_triple_distances)) 1595 return false; 1596 return as_item_varstore (optimize, use_no_variation_idx); 1597 } 1598 1599 /* keep below APIs public only for unit test: test-item-varstore */ create_from_item_varstoreOT::item_variations_t1600 bool create_from_item_varstore (const ItemVariationStore& varStore, 1601 const hb_map_t& axes_old_index_tag_map, 1602 const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ()) 1603 { 1604 const VarRegionList& regionList = varStore.get_region_list (); 1605 if (!regionList.get_var_regions (axes_old_index_tag_map, orig_region_list)) 1606 return false; 1607 1608 unsigned num_var_data = varStore.get_sub_table_count (); 1609 if (inner_maps && inner_maps.length != num_var_data) return false; 1610 if (!vars.alloc (num_var_data) || 1611 !var_data_num_rows.alloc (num_var_data)) return false; 1612 1613 for (unsigned i = 0; i < num_var_data; i++) 1614 { 1615 if (inner_maps && !inner_maps.arrayZ[i].get_population ()) 1616 continue; 1617 tuple_variations_t var_data_tuples; 1618 unsigned item_count = 0; 1619 if (!var_data_tuples.create_from_item_var_data (varStore.get_sub_table (i), 1620 orig_region_list, 1621 axes_old_index_tag_map, 1622 item_count, 1623 inner_maps ? &(inner_maps.arrayZ[i]) : nullptr)) 1624 return false; 1625 1626 var_data_num_rows.push (item_count); 1627 vars.push (std::move (var_data_tuples)); 1628 } 1629 return !vars.in_error () && !var_data_num_rows.in_error () && vars.length == var_data_num_rows.length; 1630 } 1631 instantiate_tuple_varsOT::item_variations_t1632 bool instantiate_tuple_vars (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location, 1633 const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances) 1634 { 1635 for (tuple_variations_t& tuple_vars : vars) 1636 if (!tuple_vars.instantiate (normalized_axes_location, axes_triple_distances)) 1637 return false; 1638 1639 if (!build_region_list ()) return false; 1640 return true; 1641 } 1642 build_region_listOT::item_variations_t1643 bool build_region_list () 1644 { 1645 /* scan all tuples and collect all unique regions, prune unused regions */ 1646 hb_hashmap_t<region_t, unsigned> all_regions; 1647 hb_hashmap_t<region_t, unsigned> used_regions; 1648 1649 /* use a vector when inserting new regions, make result deterministic */ 1650 hb_vector_t<region_t> all_unique_regions; 1651 for (const tuple_variations_t& sub_table : vars) 1652 { 1653 for (const tuple_delta_t& tuple : sub_table.tuple_vars) 1654 { 1655 region_t r = &(tuple.axis_tuples); 1656 if (!used_regions.has (r)) 1657 { 1658 bool all_zeros = true; 1659 for (float d : tuple.deltas_x) 1660 { 1661 int delta = (int) roundf (d); 1662 if (delta != 0) 1663 { 1664 all_zeros = false; 1665 break; 1666 } 1667 } 1668 if (!all_zeros) 1669 { 1670 if (!used_regions.set (r, 1)) 1671 return false; 1672 } 1673 } 1674 if (all_regions.has (r)) 1675 continue; 1676 if (!all_regions.set (r, 1)) 1677 return false; 1678 all_unique_regions.push (r); 1679 } 1680 } 1681 1682 /* regions are empty means no variation data, return true */ 1683 if (!all_regions || !all_unique_regions) return true; 1684 1685 if (!region_list.alloc (all_regions.get_population ())) 1686 return false; 1687 1688 unsigned idx = 0; 1689 /* append the original regions that pre-existed */ 1690 for (const auto& r : orig_region_list) 1691 { 1692 if (!all_regions.has (&r) || !used_regions.has (&r)) 1693 continue; 1694 1695 region_list.push (&r); 1696 if (!region_map.set (&r, idx)) 1697 return false; 1698 all_regions.del (&r); 1699 idx++; 1700 } 1701 1702 /* append the new regions at the end */ 1703 for (const auto& r: all_unique_regions) 1704 { 1705 if (!all_regions.has (r) || !used_regions.has (r)) 1706 continue; 1707 region_list.push (r); 1708 if (!region_map.set (r, idx)) 1709 return false; 1710 all_regions.del (r); 1711 idx++; 1712 } 1713 return (!region_list.in_error ()) && (!region_map.in_error ()); 1714 } 1715 1716 /* main algorithm ported from fonttools VarStore_optimize() method, optimize 1717 * varstore by default */ 1718 1719 struct combined_gain_idx_tuple_t 1720 { 1721 int gain; 1722 unsigned idx_1; 1723 unsigned idx_2; 1724 1725 combined_gain_idx_tuple_t () = default; combined_gain_idx_tuple_tOT::item_variations_t::combined_gain_idx_tuple_t1726 combined_gain_idx_tuple_t (int gain_, unsigned i, unsigned j) 1727 :gain (gain_), idx_1 (i), idx_2 (j) {} 1728 operator <OT::item_variations_t::combined_gain_idx_tuple_t1729 bool operator < (const combined_gain_idx_tuple_t& o) 1730 { 1731 if (gain != o.gain) 1732 return gain < o.gain; 1733 1734 if (idx_1 != o.idx_1) 1735 return idx_1 < o.idx_1; 1736 1737 return idx_2 < o.idx_2; 1738 } 1739 operator <=OT::item_variations_t::combined_gain_idx_tuple_t1740 bool operator <= (const combined_gain_idx_tuple_t& o) 1741 { 1742 if (*this < o) return true; 1743 return gain == o.gain && idx_1 == o.idx_1 && idx_2 == o.idx_2; 1744 } 1745 }; 1746 as_item_varstoreOT::item_variations_t1747 bool as_item_varstore (bool optimize=true, bool use_no_variation_idx=true) 1748 { 1749 /* return true if no variation data */ 1750 if (!region_list) return true; 1751 unsigned num_cols = region_list.length; 1752 /* pre-alloc a 2D vector for all sub_table's VarData rows */ 1753 unsigned total_rows = 0; 1754 for (unsigned major = 0; major < var_data_num_rows.length; major++) 1755 total_rows += var_data_num_rows[major]; 1756 1757 if (!delta_rows.resize (total_rows)) return false; 1758 /* init all rows to [0]*num_cols */ 1759 for (unsigned i = 0; i < total_rows; i++) 1760 if (!(delta_rows[i].resize (num_cols))) return false; 1761 1762 /* old VarIdxes -> full encoding_row mapping */ 1763 hb_hashmap_t<unsigned, const hb_vector_t<int>*> front_mapping; 1764 unsigned start_row = 0; 1765 hb_vector_t<delta_row_encoding_t> encoding_objs; 1766 hb_hashmap_t<hb_vector_t<uint8_t>, unsigned> chars_idx_map; 1767 1768 /* delta_rows map, used for filtering out duplicate rows */ 1769 hb_hashmap_t<const hb_vector_t<int>*, unsigned> delta_rows_map; 1770 for (unsigned major = 0; major < vars.length; major++) 1771 { 1772 /* deltas are stored in tuples(column based), convert them back into items 1773 * (row based) delta */ 1774 const tuple_variations_t& tuples = vars[major]; 1775 unsigned num_rows = var_data_num_rows[major]; 1776 for (const tuple_delta_t& tuple: tuples.tuple_vars) 1777 { 1778 if (tuple.deltas_x.length != num_rows) 1779 return false; 1780 1781 /* skip unused regions */ 1782 unsigned *col_idx; 1783 if (!region_map.has (&(tuple.axis_tuples), &col_idx)) 1784 continue; 1785 1786 for (unsigned i = 0; i < num_rows; i++) 1787 { 1788 int rounded_delta = roundf (tuple.deltas_x[i]); 1789 delta_rows[start_row + i][*col_idx] += rounded_delta; 1790 if ((!has_long) && (rounded_delta < -65536 || rounded_delta > 65535)) 1791 has_long = true; 1792 } 1793 } 1794 1795 if (!optimize) 1796 { 1797 /* assemble a delta_row_encoding_t for this subtable, skip optimization so 1798 * chars is not initialized, we only need delta rows for serialization */ 1799 delta_row_encoding_t obj; 1800 for (unsigned r = start_row; r < start_row + num_rows; r++) 1801 obj.add_row (&(delta_rows.arrayZ[r])); 1802 1803 encodings.push (std::move (obj)); 1804 start_row += num_rows; 1805 continue; 1806 } 1807 1808 for (unsigned minor = 0; minor < num_rows; minor++) 1809 { 1810 const hb_vector_t<int>& row = delta_rows[start_row + minor]; 1811 if (use_no_variation_idx) 1812 { 1813 bool all_zeros = true; 1814 for (int delta : row) 1815 { 1816 if (delta != 0) 1817 { 1818 all_zeros = false; 1819 break; 1820 } 1821 } 1822 if (all_zeros) 1823 continue; 1824 } 1825 1826 if (!front_mapping.set ((major<<16) + minor, &row)) 1827 return false; 1828 1829 hb_vector_t<uint8_t> chars = delta_row_encoding_t::get_row_chars (row); 1830 if (!chars) return false; 1831 1832 if (delta_rows_map.has (&row)) 1833 continue; 1834 1835 delta_rows_map.set (&row, 1); 1836 unsigned *obj_idx; 1837 if (chars_idx_map.has (chars, &obj_idx)) 1838 { 1839 delta_row_encoding_t& obj = encoding_objs[*obj_idx]; 1840 if (!obj.add_row (&row)) 1841 return false; 1842 } 1843 else 1844 { 1845 if (!chars_idx_map.set (chars, encoding_objs.length)) 1846 return false; 1847 delta_row_encoding_t obj (std::move (chars), &row); 1848 encoding_objs.push (std::move (obj)); 1849 } 1850 } 1851 1852 start_row += num_rows; 1853 } 1854 1855 /* return directly if no optimization, maintain original VariationIndex so 1856 * varidx_map would be empty */ 1857 if (!optimize) return !encodings.in_error (); 1858 1859 /* sort encoding_objs */ 1860 encoding_objs.qsort (); 1861 1862 /* main algorithm: repeatedly pick 2 best encodings to combine, and combine 1863 * them */ 1864 hb_priority_queue_t<combined_gain_idx_tuple_t> queue; 1865 unsigned num_todos = encoding_objs.length; 1866 for (unsigned i = 0; i < num_todos; i++) 1867 { 1868 for (unsigned j = i + 1; j < num_todos; j++) 1869 { 1870 int combining_gain = encoding_objs.arrayZ[i].gain_from_merging (encoding_objs.arrayZ[j]); 1871 if (combining_gain > 0) 1872 queue.insert (combined_gain_idx_tuple_t (-combining_gain, i, j), 0); 1873 } 1874 } 1875 1876 hb_set_t removed_todo_idxes; 1877 while (queue) 1878 { 1879 auto t = queue.pop_minimum ().first; 1880 unsigned i = t.idx_1; 1881 unsigned j = t.idx_2; 1882 1883 if (removed_todo_idxes.has (i) || removed_todo_idxes.has (j)) 1884 continue; 1885 1886 delta_row_encoding_t& encoding = encoding_objs.arrayZ[i]; 1887 delta_row_encoding_t& other_encoding = encoding_objs.arrayZ[j]; 1888 1889 removed_todo_idxes.add (i); 1890 removed_todo_idxes.add (j); 1891 1892 hb_vector_t<uint8_t> combined_chars; 1893 if (!combined_chars.alloc (encoding.chars.length)) 1894 return false; 1895 1896 for (unsigned idx = 0; idx < encoding.chars.length; idx++) 1897 { 1898 uint8_t v = hb_max (encoding.chars.arrayZ[idx], other_encoding.chars.arrayZ[idx]); 1899 combined_chars.push (v); 1900 } 1901 1902 delta_row_encoding_t combined_encoding_obj (std::move (combined_chars)); 1903 for (const auto& row : hb_concat (encoding.items, other_encoding.items)) 1904 combined_encoding_obj.add_row (row); 1905 1906 for (unsigned idx = 0; idx < encoding_objs.length; idx++) 1907 { 1908 if (removed_todo_idxes.has (idx)) continue; 1909 1910 const delta_row_encoding_t& obj = encoding_objs.arrayZ[idx]; 1911 if (obj.chars == combined_chars) 1912 { 1913 for (const auto& row : obj.items) 1914 combined_encoding_obj.add_row (row); 1915 1916 removed_todo_idxes.add (idx); 1917 continue; 1918 } 1919 1920 int combined_gain = combined_encoding_obj.gain_from_merging (obj); 1921 if (combined_gain > 0) 1922 queue.insert (combined_gain_idx_tuple_t (-combined_gain, idx, encoding_objs.length), 0); 1923 } 1924 1925 encoding_objs.push (std::move (combined_encoding_obj)); 1926 } 1927 1928 int num_final_encodings = (int) encoding_objs.length - (int) removed_todo_idxes.get_population (); 1929 if (num_final_encodings <= 0) return false; 1930 1931 if (!encodings.alloc (num_final_encodings)) return false; 1932 for (unsigned i = 0; i < encoding_objs.length; i++) 1933 { 1934 if (removed_todo_idxes.has (i)) continue; 1935 encodings.push (std::move (encoding_objs.arrayZ[i])); 1936 } 1937 1938 /* sort again based on width, make result deterministic */ 1939 encodings.qsort (delta_row_encoding_t::cmp_width); 1940 1941 return compile_varidx_map (front_mapping); 1942 } 1943 1944 private: 1945 /* compile varidx_map for one VarData subtable (index specified by major) */ compile_varidx_mapOT::item_variations_t1946 bool compile_varidx_map (const hb_hashmap_t<unsigned, const hb_vector_t<int>*>& front_mapping) 1947 { 1948 /* full encoding_row -> new VarIdxes mapping */ 1949 hb_hashmap_t<const hb_vector_t<int>*, unsigned> back_mapping; 1950 1951 for (unsigned major = 0; major < encodings.length; major++) 1952 { 1953 delta_row_encoding_t& encoding = encodings[major]; 1954 /* just sanity check, this shouldn't happen */ 1955 if (encoding.is_empty ()) 1956 return false; 1957 1958 unsigned num_rows = encoding.items.length; 1959 1960 /* sort rows, make result deterministic */ 1961 encoding.items.qsort (_cmp_row); 1962 1963 /* compile old to new var_idxes mapping */ 1964 for (unsigned minor = 0; minor < num_rows; minor++) 1965 { 1966 unsigned new_varidx = (major << 16) + minor; 1967 back_mapping.set (encoding.items.arrayZ[minor], new_varidx); 1968 } 1969 } 1970 1971 for (auto _ : front_mapping.iter ()) 1972 { 1973 unsigned old_varidx = _.first; 1974 unsigned *new_varidx; 1975 if (back_mapping.has (_.second, &new_varidx)) 1976 varidx_map.set (old_varidx, *new_varidx); 1977 else 1978 varidx_map.set (old_varidx, HB_OT_LAYOUT_NO_VARIATIONS_INDEX); 1979 } 1980 return !varidx_map.in_error (); 1981 } 1982 _cmp_rowOT::item_variations_t1983 static int _cmp_row (const void *pa, const void *pb) 1984 { 1985 /* compare pointers of vectors(const hb_vector_t<int>*) that represent a row */ 1986 const hb_vector_t<int>** a = (const hb_vector_t<int>**) pa; 1987 const hb_vector_t<int>** b = (const hb_vector_t<int>**) pb; 1988 1989 for (unsigned i = 0; i < (*b)->length; i++) 1990 { 1991 int va = (*a)->arrayZ[i]; 1992 int vb = (*b)->arrayZ[i]; 1993 if (va != vb) 1994 return va < vb ? -1 : 1; 1995 } 1996 return 0; 1997 } 1998 }; 1999 2000 2001 } /* namespace OT */ 2002 2003 2004 #endif /* HB_OT_VAR_COMMON_HH */ 2005