• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021  Google, Inc.
3  *
4  *  This is part of HarfBuzz, a text shaping library.
5  *
6  * Permission is hereby granted, without written agreement and without
7  * license or royalty fees, to use, copy, modify, and distribute this
8  * software and its documentation for any purpose, provided that the
9  * above copyright notice and the following two paragraphs appear in
10  * all copies of this software.
11  *
12  * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
13  * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
14  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
15  * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
16  * DAMAGE.
17  *
18  * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
19  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
20  * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
21  * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
22  * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
23  *
24  */
25 
26 #ifndef HB_OT_VAR_COMMON_HH
27 #define HB_OT_VAR_COMMON_HH
28 
29 #include "hb-ot-layout-common.hh"
30 #include "hb-priority-queue.hh"
31 
32 
33 namespace OT {
34 
35 template <typename MapCountT>
36 struct DeltaSetIndexMapFormat01
37 {
38   friend struct DeltaSetIndexMap;
39 
get_sizeOT::DeltaSetIndexMapFormat0140   unsigned get_size () const
41   { return min_size + mapCount * get_width (); }
42 
43   private:
copyOT::DeltaSetIndexMapFormat0144   DeltaSetIndexMapFormat01* copy (hb_serialize_context_t *c) const
45   {
46     TRACE_SERIALIZE (this);
47     return_trace (c->embed (this));
48   }
49 
50   template <typename T>
serializeOT::DeltaSetIndexMapFormat0151   bool serialize (hb_serialize_context_t *c, const T &plan)
52   {
53     unsigned int width = plan.get_width ();
54     unsigned int inner_bit_count = plan.get_inner_bit_count ();
55     const hb_array_t<const uint32_t> output_map = plan.get_output_map ();
56 
57     TRACE_SERIALIZE (this);
58     if (unlikely (output_map.length && ((((inner_bit_count-1)&~0xF)!=0) || (((width-1)&~0x3)!=0))))
59       return_trace (false);
60     if (unlikely (!c->extend_min (this))) return_trace (false);
61 
62     entryFormat = ((width-1)<<4)|(inner_bit_count-1);
63     mapCount = output_map.length;
64     HBUINT8 *p = c->allocate_size<HBUINT8> (width * output_map.length);
65     if (unlikely (!p)) return_trace (false);
66     for (unsigned int i = 0; i < output_map.length; i++)
67     {
68       unsigned int v = output_map.arrayZ[i];
69       if (v)
70       {
71 	unsigned int outer = v >> 16;
72 	unsigned int inner = v & 0xFFFF;
73 	unsigned int u = (outer << inner_bit_count) | inner;
74 	for (unsigned int w = width; w > 0;)
75 	{
76 	  p[--w] = u;
77 	  u >>= 8;
78 	}
79       }
80       p += width;
81     }
82     return_trace (true);
83   }
84 
mapOT::DeltaSetIndexMapFormat0185   uint32_t map (unsigned int v) const /* Returns 16.16 outer.inner. */
86   {
87     /* If count is zero, pass value unchanged.  This takes
88      * care of direct mapping for advance map. */
89     if (!mapCount)
90       return v;
91 
92     if (v >= mapCount)
93       v = mapCount - 1;
94 
95     unsigned int u = 0;
96     { /* Fetch it. */
97       unsigned int w = get_width ();
98       const HBUINT8 *p = mapDataZ.arrayZ + w * v;
99       for (; w; w--)
100         u = (u << 8) + *p++;
101     }
102 
103     { /* Repack it. */
104       unsigned int n = get_inner_bit_count ();
105       unsigned int outer = u >> n;
106       unsigned int inner = u & ((1 << n) - 1);
107       u = (outer<<16) | inner;
108     }
109 
110     return u;
111   }
112 
get_map_countOT::DeltaSetIndexMapFormat01113   unsigned get_map_count () const       { return mapCount; }
get_widthOT::DeltaSetIndexMapFormat01114   unsigned get_width () const           { return ((entryFormat >> 4) & 3) + 1; }
get_inner_bit_countOT::DeltaSetIndexMapFormat01115   unsigned get_inner_bit_count () const { return (entryFormat & 0xF) + 1; }
116 
117 
sanitizeOT::DeltaSetIndexMapFormat01118   bool sanitize (hb_sanitize_context_t *c) const
119   {
120     TRACE_SANITIZE (this);
121     return_trace (c->check_struct (this) &&
122                   c->check_range (mapDataZ.arrayZ,
123                                   mapCount,
124                                   get_width ()));
125   }
126 
127   protected:
128   HBUINT8       format;         /* Format identifier--format = 0 */
129   HBUINT8       entryFormat;    /* A packed field that describes the compressed
130                                  * representation of delta-set indices. */
131   MapCountT     mapCount;       /* The number of mapping entries. */
132   UnsizedArrayOf<HBUINT8>
133                 mapDataZ;       /* The delta-set index mapping data. */
134 
135   public:
136   DEFINE_SIZE_ARRAY (2+MapCountT::static_size, mapDataZ);
137 };
138 
139 struct DeltaSetIndexMap
140 {
141   template <typename T>
serializeOT::DeltaSetIndexMap142   bool serialize (hb_serialize_context_t *c, const T &plan)
143   {
144     TRACE_SERIALIZE (this);
145     unsigned length = plan.get_output_map ().length;
146     u.format = length <= 0xFFFF ? 0 : 1;
147     switch (u.format) {
148     case 0: return_trace (u.format0.serialize (c, plan));
149     case 1: return_trace (u.format1.serialize (c, plan));
150     default:return_trace (false);
151     }
152   }
153 
mapOT::DeltaSetIndexMap154   uint32_t map (unsigned v) const
155   {
156     switch (u.format) {
157     case 0: return (u.format0.map (v));
158     case 1: return (u.format1.map (v));
159     default:return v;
160     }
161   }
162 
get_map_countOT::DeltaSetIndexMap163   unsigned get_map_count () const
164   {
165     switch (u.format) {
166     case 0: return u.format0.get_map_count ();
167     case 1: return u.format1.get_map_count ();
168     default:return 0;
169     }
170   }
171 
get_widthOT::DeltaSetIndexMap172   unsigned get_width () const
173   {
174     switch (u.format) {
175     case 0: return u.format0.get_width ();
176     case 1: return u.format1.get_width ();
177     default:return 0;
178     }
179   }
180 
get_inner_bit_countOT::DeltaSetIndexMap181   unsigned get_inner_bit_count () const
182   {
183     switch (u.format) {
184     case 0: return u.format0.get_inner_bit_count ();
185     case 1: return u.format1.get_inner_bit_count ();
186     default:return 0;
187     }
188   }
189 
sanitizeOT::DeltaSetIndexMap190   bool sanitize (hb_sanitize_context_t *c) const
191   {
192     TRACE_SANITIZE (this);
193     if (!u.format.sanitize (c)) return_trace (false);
194     switch (u.format) {
195     case 0: return_trace (u.format0.sanitize (c));
196     case 1: return_trace (u.format1.sanitize (c));
197     default:return_trace (true);
198     }
199   }
200 
copyOT::DeltaSetIndexMap201   DeltaSetIndexMap* copy (hb_serialize_context_t *c) const
202   {
203     TRACE_SERIALIZE (this);
204     switch (u.format) {
205     case 0: return_trace (reinterpret_cast<DeltaSetIndexMap *> (u.format0.copy (c)));
206     case 1: return_trace (reinterpret_cast<DeltaSetIndexMap *> (u.format1.copy (c)));
207     default:return_trace (nullptr);
208     }
209   }
210 
211   protected:
212   union {
213   HBUINT8                            format;         /* Format identifier */
214   DeltaSetIndexMapFormat01<HBUINT16> format0;
215   DeltaSetIndexMapFormat01<HBUINT32> format1;
216   } u;
217   public:
218   DEFINE_SIZE_UNION (1, format);
219 };
220 
221 
222 struct VarStoreInstancer
223 {
VarStoreInstancerOT::VarStoreInstancer224   VarStoreInstancer (const VariationStore *varStore,
225 		     const DeltaSetIndexMap *varIdxMap,
226 		     hb_array_t<int> coords) :
227     varStore (varStore), varIdxMap (varIdxMap), coords (coords) {}
228 
operator boolOT::VarStoreInstancer229   operator bool () const { return varStore && bool (coords); }
230 
231   /* according to the spec, if colr table has varStore but does not have
232    * varIdxMap, then an implicit identity mapping is used */
operator ()OT::VarStoreInstancer233   float operator() (uint32_t varIdx, unsigned short offset = 0) const
234   { return coords ? varStore->get_delta (varIdxMap ? varIdxMap->map (VarIdx::add (varIdx, offset)) : varIdx + offset, coords) : 0; }
235 
236   const VariationStore *varStore;
237   const DeltaSetIndexMap *varIdxMap;
238   hb_array_t<int> coords;
239 };
240 
241 /* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
242 struct TupleVariationHeader
243 {
244   friend struct tuple_delta_t;
get_sizeOT::TupleVariationHeader245   unsigned get_size (unsigned axis_count) const
246   { return min_size + get_all_tuples (axis_count).get_size (); }
247 
get_data_sizeOT::TupleVariationHeader248   unsigned get_data_size () const { return varDataSize; }
249 
get_nextOT::TupleVariationHeader250   const TupleVariationHeader &get_next (unsigned axis_count) const
251   { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
252 
unpack_axis_tuplesOT::TupleVariationHeader253   bool unpack_axis_tuples (unsigned axis_count,
254                            const hb_array_t<const F2DOT14> shared_tuples,
255                            const hb_map_t *axes_old_index_tag_map,
256                            hb_hashmap_t<hb_tag_t, Triple>& axis_tuples /* OUT */) const
257   {
258     const F2DOT14 *peak_tuple = nullptr;
259     if (has_peak ())
260       peak_tuple = get_peak_tuple (axis_count).arrayZ;
261     else
262     {
263       unsigned int index = get_index ();
264       if (unlikely ((index + 1) * axis_count > shared_tuples.length))
265         return false;
266       peak_tuple = shared_tuples.sub_array (axis_count * index, axis_count).arrayZ;
267     }
268 
269     const F2DOT14 *start_tuple = nullptr;
270     const F2DOT14 *end_tuple = nullptr;
271     bool has_interm = has_intermediate ();
272 
273     if (has_interm)
274     {
275       start_tuple = get_start_tuple (axis_count).arrayZ;
276       end_tuple = get_end_tuple (axis_count).arrayZ;
277     }
278 
279     for (unsigned i = 0; i < axis_count; i++)
280     {
281       float peak = peak_tuple[i].to_float ();
282       if (peak == 0.f) continue;
283 
284       hb_tag_t *axis_tag;
285       if (!axes_old_index_tag_map->has (i, &axis_tag))
286         return false;
287 
288       float start, end;
289       if (has_interm)
290       {
291         start = start_tuple[i].to_float ();
292         end = end_tuple[i].to_float ();
293       }
294       else
295       {
296         start = hb_min (peak, 0.f);
297         end = hb_max (peak, 0.f);
298       }
299       axis_tuples.set (*axis_tag, Triple (start, peak, end));
300     }
301 
302     return true;
303   }
304 
calculate_scalarOT::TupleVariationHeader305   float calculate_scalar (hb_array_t<int> coords, unsigned int coord_count,
306                           const hb_array_t<const F2DOT14> shared_tuples,
307 			  const hb_vector_t<hb_pair_t<int,int>> *shared_tuple_active_idx = nullptr) const
308   {
309     const F2DOT14 *peak_tuple;
310 
311     unsigned start_idx = 0;
312     unsigned end_idx = coord_count;
313     unsigned step = 1;
314 
315     if (has_peak ())
316       peak_tuple = get_peak_tuple (coord_count).arrayZ;
317     else
318     {
319       unsigned int index = get_index ();
320       if (unlikely ((index + 1) * coord_count > shared_tuples.length))
321         return 0.f;
322       peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count).arrayZ;
323 
324       if (shared_tuple_active_idx)
325       {
326 	if (unlikely (index >= shared_tuple_active_idx->length))
327 	  return 0.f;
328 	auto _ = (*shared_tuple_active_idx).arrayZ[index];
329 	if (_.second != -1)
330 	{
331 	  start_idx = _.first;
332 	  end_idx = _.second + 1;
333 	  step = _.second - _.first;
334 	}
335 	else if (_.first != -1)
336 	{
337 	  start_idx = _.first;
338 	  end_idx = start_idx + 1;
339 	}
340       }
341     }
342 
343     const F2DOT14 *start_tuple = nullptr;
344     const F2DOT14 *end_tuple = nullptr;
345     bool has_interm = has_intermediate ();
346     if (has_interm)
347     {
348       start_tuple = get_start_tuple (coord_count).arrayZ;
349       end_tuple = get_end_tuple (coord_count).arrayZ;
350     }
351 
352     float scalar = 1.f;
353     for (unsigned int i = start_idx; i < end_idx; i += step)
354     {
355       int peak = peak_tuple[i].to_int ();
356       if (!peak) continue;
357 
358       int v = coords[i];
359       if (v == peak) continue;
360 
361       if (has_interm)
362       {
363         int start = start_tuple[i].to_int ();
364         int end = end_tuple[i].to_int ();
365         if (unlikely (start > peak || peak > end ||
366                       (start < 0 && end > 0 && peak))) continue;
367         if (v < start || v > end) return 0.f;
368         if (v < peak)
369         { if (peak != start) scalar *= (float) (v - start) / (peak - start); }
370         else
371         { if (peak != end) scalar *= (float) (end - v) / (end - peak); }
372       }
373       else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f;
374       else
375         scalar *= (float) v / peak;
376     }
377     return scalar;
378   }
379 
has_peakOT::TupleVariationHeader380   bool           has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
has_intermediateOT::TupleVariationHeader381   bool   has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
has_private_pointsOT::TupleVariationHeader382   bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
get_indexOT::TupleVariationHeader383   unsigned      get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
384 
385   protected:
386   struct TuppleIndex : HBUINT16
387   {
388     enum Flags {
389       EmbeddedPeakTuple   = 0x8000u,
390       IntermediateRegion  = 0x4000u,
391       PrivatePointNumbers = 0x2000u,
392       TupleIndexMask      = 0x0FFFu
393     };
394 
operator =OT::TupleVariationHeader::TuppleIndex395     TuppleIndex& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
396     DEFINE_SIZE_STATIC (2);
397   };
398 
get_all_tuplesOT::TupleVariationHeader399   hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
400   { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
get_peak_tupleOT::TupleVariationHeader401   hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
402   { return get_all_tuples (axis_count).sub_array (0, axis_count); }
get_start_tupleOT::TupleVariationHeader403   hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
404   { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
get_end_tupleOT::TupleVariationHeader405   hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
406   { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
407 
408   HBUINT16      varDataSize;    /* The size in bytes of the serialized
409                                  * data for this tuple variation table. */
410   TuppleIndex   tupleIndex;     /* A packed field. The high 4 bits are flags (see below).
411                                    The low 12 bits are an index into a shared tuple
412                                    records array. */
413   /* UnsizedArrayOf<F2DOT14> peakTuple - optional */
414                                 /* Peak tuple record for this tuple variation table — optional,
415                                  * determined by flags in the tupleIndex value.
416                                  *
417                                  * Note that this must always be included in the 'cvar' table. */
418   /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
419                                 /* Intermediate start tuple record for this tuple variation table — optional,
420                                    determined by flags in the tupleIndex value. */
421   /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
422                                 /* Intermediate end tuple record for this tuple variation table — optional,
423                                  * determined by flags in the tupleIndex value. */
424   public:
425   DEFINE_SIZE_MIN (4);
426 };
427 
428 enum packed_delta_flag_t
429 {
430   DELTAS_ARE_ZERO      = 0x80,
431   DELTAS_ARE_WORDS     = 0x40,
432   DELTA_RUN_COUNT_MASK = 0x3F
433 };
434 
435 struct tuple_delta_t
436 {
437   public:
438   hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
439 
440   /* indices_length = point_count, indice[i] = 1 means point i is referenced */
441   hb_vector_t<bool> indices;
442 
443   hb_vector_t<float> deltas_x;
444   /* empty for cvar tuples */
445   hb_vector_t<float> deltas_y;
446 
447   /* compiled data: header and deltas
448    * compiled point data is saved in a hashmap within tuple_variations_t cause
449    * some point sets might be reused by different tuple variations */
450   hb_vector_t<char> compiled_tuple_header;
451   hb_vector_t<char> compiled_deltas;
452 
453   /* compiled peak coords, empty for non-gvar tuples */
454   hb_vector_t<char> compiled_peak_coords;
455 
456   tuple_delta_t () = default;
457   tuple_delta_t (const tuple_delta_t& o) = default;
458 
swap(tuple_delta_t & a,tuple_delta_t & b)459   friend void swap (tuple_delta_t& a, tuple_delta_t& b)
460   {
461     hb_swap (a.axis_tuples, b.axis_tuples);
462     hb_swap (a.indices, b.indices);
463     hb_swap (a.deltas_x, b.deltas_x);
464     hb_swap (a.deltas_y, b.deltas_y);
465     hb_swap (a.compiled_tuple_header, b.compiled_tuple_header);
466     hb_swap (a.compiled_deltas, b.compiled_deltas);
467     hb_swap (a.compiled_peak_coords, b.compiled_peak_coords);
468   }
469 
tuple_delta_tOT::tuple_delta_t470   tuple_delta_t (tuple_delta_t&& o) : tuple_delta_t ()
471   { hb_swap (*this, o); }
472 
operator =OT::tuple_delta_t473   tuple_delta_t& operator = (tuple_delta_t&& o)
474   {
475     hb_swap (*this, o);
476     return *this;
477   }
478 
remove_axisOT::tuple_delta_t479   void remove_axis (hb_tag_t axis_tag)
480   { axis_tuples.del (axis_tag); }
481 
set_tentOT::tuple_delta_t482   bool set_tent (hb_tag_t axis_tag, Triple tent)
483   { return axis_tuples.set (axis_tag, tent); }
484 
operator +=OT::tuple_delta_t485   tuple_delta_t& operator += (const tuple_delta_t& o)
486   {
487     unsigned num = indices.length;
488     for (unsigned i = 0; i < num; i++)
489     {
490       if (indices.arrayZ[i])
491       {
492         if (o.indices.arrayZ[i])
493         {
494           deltas_x[i] += o.deltas_x[i];
495           if (deltas_y && o.deltas_y)
496             deltas_y[i] += o.deltas_y[i];
497         }
498       }
499       else
500       {
501         if (!o.indices.arrayZ[i]) continue;
502         indices.arrayZ[i] = true;
503         deltas_x[i] = o.deltas_x[i];
504         if (deltas_y && o.deltas_y)
505           deltas_y[i] = o.deltas_y[i];
506       }
507     }
508     return *this;
509   }
510 
operator *=OT::tuple_delta_t511   tuple_delta_t& operator *= (float scalar)
512   {
513     if (scalar == 1.0f)
514       return *this;
515 
516     unsigned num = indices.length;
517     for (unsigned i = 0; i < num; i++)
518     {
519       if (!indices.arrayZ[i]) continue;
520 
521       deltas_x[i] *= scalar;
522       if (deltas_y)
523         deltas_y[i] *= scalar;
524     }
525     return *this;
526   }
527 
change_tuple_var_axis_limitOT::tuple_delta_t528   hb_vector_t<tuple_delta_t> change_tuple_var_axis_limit (hb_tag_t axis_tag, Triple axis_limit,
529                                                           TripleDistances axis_triple_distances) const
530   {
531     hb_vector_t<tuple_delta_t> out;
532     Triple *tent;
533     if (!axis_tuples.has (axis_tag, &tent))
534     {
535       out.push (*this);
536       return out;
537     }
538 
539     if ((tent->minimum < 0.f && tent->maximum > 0.f) ||
540         !(tent->minimum <= tent->middle && tent->middle <= tent->maximum))
541       return out;
542 
543     if (tent->middle == 0.f)
544     {
545       out.push (*this);
546       return out;
547     }
548 
549     result_t solutions = rebase_tent (*tent, axis_limit, axis_triple_distances);
550     for (auto t : solutions)
551     {
552       tuple_delta_t new_var = *this;
553       if (t.second == Triple ())
554         new_var.remove_axis (axis_tag);
555       else
556         new_var.set_tent (axis_tag, t.second);
557 
558       new_var *= t.first;
559       out.push (std::move (new_var));
560     }
561 
562     return out;
563   }
564 
compile_peak_coordsOT::tuple_delta_t565   bool compile_peak_coords (const hb_map_t& axes_index_map,
566                             const hb_map_t& axes_old_index_tag_map)
567   {
568     unsigned axis_count = axes_index_map.get_population ();
569     if (unlikely (!compiled_peak_coords.alloc (axis_count * F2DOT14::static_size)))
570       return false;
571 
572     unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
573     for (unsigned i = 0; i < orig_axis_count; i++)
574     {
575       if (!axes_index_map.has (i))
576         continue;
577 
578       hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
579       Triple *coords;
580       F2DOT14 peak_coord;
581       if (axis_tuples.has (axis_tag, &coords))
582         peak_coord.set_float (coords->middle);
583       else
584         peak_coord.set_int (0);
585 
586       /* push F2DOT14 value into char vector */
587       int16_t val = peak_coord.to_int ();
588       compiled_peak_coords.push (static_cast<char> (val >> 8));
589       compiled_peak_coords.push (static_cast<char> (val & 0xFF));
590     }
591 
592     return !compiled_peak_coords.in_error ();
593   }
594 
595   /* deltas should be compiled already before we compile tuple
596    * variation header cause we need to fill in the size of the
597    * serialized data for this tuple variation */
compile_tuple_var_headerOT::tuple_delta_t598   bool compile_tuple_var_header (const hb_map_t& axes_index_map,
599                                  unsigned points_data_length,
600                                  const hb_map_t& axes_old_index_tag_map,
601                                  const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map)
602   {
603     if (!compiled_deltas) return false;
604 
605     unsigned cur_axis_count = axes_index_map.get_population ();
606     /* allocate enough memory: 1 peak + 2 intermediate coords + fixed header size */
607     unsigned alloc_len = 3 * cur_axis_count * (F2DOT14::static_size) + 4;
608     if (unlikely (!compiled_tuple_header.resize (alloc_len))) return false;
609 
610     unsigned flag = 0;
611     /* skip the first 4 header bytes: variationDataSize+tupleIndex */
612     F2DOT14* p = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.begin () + 4);
613     F2DOT14* end = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.end ());
614     hb_array_t<F2DOT14> coords (p, end - p);
615 
616     /* encode peak coords */
617     unsigned peak_count = 0;
618     unsigned *shared_tuple_idx;
619     if (shared_tuples_idx_map &&
620         shared_tuples_idx_map->has (&compiled_peak_coords, &shared_tuple_idx))
621     {
622       flag = *shared_tuple_idx;
623     }
624     else
625     {
626       peak_count = encode_peak_coords(coords, flag, axes_index_map, axes_old_index_tag_map);
627       if (!peak_count) return false;
628     }
629 
630     /* encode interim coords, it's optional so returned num could be 0 */
631     unsigned interim_count = encode_interm_coords (coords.sub_array (peak_count), flag, axes_index_map, axes_old_index_tag_map);
632 
633     /* pointdata length = 0 implies "use shared points" */
634     if (points_data_length)
635       flag |= TupleVariationHeader::TuppleIndex::PrivatePointNumbers;
636 
637     unsigned serialized_data_size = points_data_length + compiled_deltas.length;
638     TupleVariationHeader *o = reinterpret_cast<TupleVariationHeader *> (compiled_tuple_header.begin ());
639     o->varDataSize = serialized_data_size;
640     o->tupleIndex = flag;
641 
642     unsigned total_header_len = 4 + (peak_count + interim_count) * (F2DOT14::static_size);
643     return compiled_tuple_header.resize (total_header_len);
644   }
645 
encode_peak_coordsOT::tuple_delta_t646   unsigned encode_peak_coords (hb_array_t<F2DOT14> peak_coords,
647                                unsigned& flag,
648                                const hb_map_t& axes_index_map,
649                                const hb_map_t& axes_old_index_tag_map) const
650   {
651     unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
652     auto it = peak_coords.iter ();
653     unsigned count = 0;
654     for (unsigned i = 0; i < orig_axis_count; i++)
655     {
656       if (!axes_index_map.has (i)) /* axis pinned */
657         continue;
658       hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
659       Triple *coords;
660       if (!axis_tuples.has (axis_tag, &coords))
661         (*it).set_int (0);
662       else
663         (*it).set_float (coords->middle);
664       it++;
665       count++;
666     }
667     flag |= TupleVariationHeader::TuppleIndex::EmbeddedPeakTuple;
668     return count;
669   }
670 
671   /* if no need to encode intermediate coords, then just return p */
encode_interm_coordsOT::tuple_delta_t672   unsigned encode_interm_coords (hb_array_t<F2DOT14> coords,
673                                  unsigned& flag,
674                                  const hb_map_t& axes_index_map,
675                                  const hb_map_t& axes_old_index_tag_map) const
676   {
677     unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
678     unsigned cur_axis_count = axes_index_map.get_population ();
679 
680     auto start_coords_iter = coords.sub_array (0, cur_axis_count).iter ();
681     auto end_coords_iter = coords.sub_array (cur_axis_count).iter ();
682     bool encode_needed = false;
683     unsigned count = 0;
684     for (unsigned i = 0; i < orig_axis_count; i++)
685     {
686       if (!axes_index_map.has (i)) /* axis pinned */
687         continue;
688       hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
689       Triple *coords;
690       float min_val = 0.f, val = 0.f, max_val = 0.f;
691       if (axis_tuples.has (axis_tag, &coords))
692       {
693         min_val = coords->minimum;
694         val = coords->middle;
695         max_val = coords->maximum;
696       }
697 
698       (*start_coords_iter).set_float (min_val);
699       (*end_coords_iter).set_float (max_val);
700 
701       start_coords_iter++;
702       end_coords_iter++;
703       count += 2;
704       if (min_val != hb_min (val, 0.f) || max_val != hb_max (val, 0.f))
705         encode_needed = true;
706     }
707 
708     if (encode_needed)
709     {
710       flag |= TupleVariationHeader::TuppleIndex::IntermediateRegion;
711       return count;
712     }
713     return 0;
714   }
715 
compile_deltasOT::tuple_delta_t716   bool compile_deltas ()
717   {
718     hb_vector_t<int> rounded_deltas;
719     if (unlikely (!rounded_deltas.alloc (indices.length)))
720       return false;
721 
722     for (unsigned i = 0; i < indices.length; i++)
723     {
724       if (!indices[i]) continue;
725       int rounded_delta = (int) roundf (deltas_x[i]);
726       rounded_deltas.push (rounded_delta);
727     }
728 
729     if (!rounded_deltas) return false;
730     /* allocate enough memories 3 * num_deltas */
731     unsigned alloc_len = 3 * rounded_deltas.length;
732     if (deltas_y)
733       alloc_len *= 2;
734 
735     if (unlikely (!compiled_deltas.resize (alloc_len))) return false;
736 
737     unsigned i = 0;
738     unsigned encoded_len = encode_delta_run (i, compiled_deltas.as_array (), rounded_deltas);
739 
740     if (deltas_y)
741     {
742       /* reuse the rounded_deltas vector, check that deltas_y have the same num of deltas as deltas_x */
743       unsigned j = 0;
744       for (unsigned idx = 0; idx < indices.length; idx++)
745       {
746         if (!indices[idx]) continue;
747         int rounded_delta = (int) roundf (deltas_y[idx]);
748 
749         if (j >= rounded_deltas.length) return false;
750 
751         rounded_deltas[j++] = rounded_delta;
752       }
753 
754       if (j != rounded_deltas.length) return false;
755       /* reset i because we reuse rounded_deltas for deltas_y */
756       i = 0;
757       encoded_len += encode_delta_run (i, compiled_deltas.as_array ().sub_array (encoded_len), rounded_deltas);
758     }
759     return compiled_deltas.resize (encoded_len);
760   }
761 
encode_delta_runOT::tuple_delta_t762   unsigned encode_delta_run (unsigned& i,
763                              hb_array_t<char> encoded_bytes,
764                              const hb_vector_t<int>& deltas) const
765   {
766     unsigned num_deltas = deltas.length;
767     unsigned encoded_len = 0;
768     while (i < num_deltas)
769     {
770       int val = deltas[i];
771       if (val == 0)
772         encoded_len += encode_delta_run_as_zeroes (i, encoded_bytes.sub_array (encoded_len), deltas);
773       else if (val >= -128 && val <= 127)
774         encoded_len += encode_delta_run_as_bytes (i, encoded_bytes.sub_array (encoded_len), deltas);
775       else
776         encoded_len += encode_delta_run_as_words (i, encoded_bytes.sub_array (encoded_len), deltas);
777     }
778     return encoded_len;
779   }
780 
encode_delta_run_as_zeroesOT::tuple_delta_t781   unsigned encode_delta_run_as_zeroes (unsigned& i,
782                                        hb_array_t<char> encoded_bytes,
783                                        const hb_vector_t<int>& deltas) const
784   {
785     unsigned num_deltas = deltas.length;
786     unsigned run_length = 0;
787     auto it = encoded_bytes.iter ();
788     unsigned encoded_len = 0;
789     while (i < num_deltas && deltas[i] == 0)
790     {
791       i++;
792       run_length++;
793     }
794 
795     while (run_length >= 64)
796     {
797       *it++ = char (DELTAS_ARE_ZERO | 63);
798       run_length -= 64;
799       encoded_len++;
800     }
801 
802     if (run_length)
803     {
804       *it++ = char (DELTAS_ARE_ZERO | (run_length - 1));
805       encoded_len++;
806     }
807     return encoded_len;
808   }
809 
encode_delta_run_as_bytesOT::tuple_delta_t810   unsigned encode_delta_run_as_bytes (unsigned &i,
811                                       hb_array_t<char> encoded_bytes,
812                                       const hb_vector_t<int>& deltas) const
813   {
814     unsigned start = i;
815     unsigned num_deltas = deltas.length;
816     while (i < num_deltas)
817     {
818       int val = deltas[i];
819       if (val > 127 || val < -128)
820         break;
821 
822       /* from fonttools: if there're 2 or more zeros in a sequence,
823        * it is better to start a new run to save bytes. */
824       if (val == 0 && i + 1 < num_deltas && deltas[i+1] == 0)
825         break;
826 
827       i++;
828     }
829     unsigned run_length = i - start;
830 
831     unsigned encoded_len = 0;
832     auto it = encoded_bytes.iter ();
833 
834     while (run_length >= 64)
835     {
836       *it++ = 63;
837       encoded_len++;
838 
839       for (unsigned j = 0; j < 64; j++)
840       {
841         *it++ = static_cast<char> (deltas[start + j]);
842         encoded_len++;
843       }
844 
845       start += 64;
846       run_length -= 64;
847     }
848 
849     if (run_length)
850     {
851       *it++ = run_length - 1;
852       encoded_len++;
853 
854       while (start < i)
855       {
856         *it++ = static_cast<char> (deltas[start++]);
857         encoded_len++;
858       }
859     }
860 
861     return encoded_len;
862   }
863 
encode_delta_run_as_wordsOT::tuple_delta_t864   unsigned encode_delta_run_as_words (unsigned &i,
865                                       hb_array_t<char> encoded_bytes,
866                                       const hb_vector_t<int>& deltas) const
867   {
868     unsigned start = i;
869     unsigned num_deltas = deltas.length;
870     while (i < num_deltas)
871     {
872       int val = deltas[i];
873 
874       /* start a new run for a single zero value*/
875       if (val == 0) break;
876 
877       /* from fonttools: continue word-encoded run if there's only one
878        * single value in the range [-128, 127] because it is more compact.
879        * Only start a new run when there're 2 continuous such values. */
880       if (val >= -128 && val <= 127 &&
881           i + 1 < num_deltas &&
882           deltas[i+1] >= -128 && deltas[i+1] <= 127)
883         break;
884 
885       i++;
886     }
887 
888     unsigned run_length = i - start;
889     auto it = encoded_bytes.iter ();
890     unsigned encoded_len = 0;
891     while (run_length >= 64)
892     {
893       *it++ = (DELTAS_ARE_WORDS | 63);
894       encoded_len++;
895 
896       for (unsigned j = 0; j < 64; j++)
897       {
898         int16_t delta_val = deltas[start + j];
899         *it++ = static_cast<char> (delta_val >> 8);
900         *it++ = static_cast<char> (delta_val & 0xFF);
901 
902         encoded_len += 2;
903       }
904 
905       start += 64;
906       run_length -= 64;
907     }
908 
909     if (run_length)
910     {
911       *it++ = (DELTAS_ARE_WORDS | (run_length - 1));
912       encoded_len++;
913       while (start < i)
914       {
915         int16_t delta_val = deltas[start++];
916         *it++ = static_cast<char> (delta_val >> 8);
917         *it++ = static_cast<char> (delta_val & 0xFF);
918 
919         encoded_len += 2;
920       }
921     }
922     return encoded_len;
923   }
924 
calc_inferred_deltasOT::tuple_delta_t925   bool calc_inferred_deltas (const contour_point_vector_t& orig_points)
926   {
927     unsigned point_count = orig_points.length;
928     if (point_count != indices.length)
929       return false;
930 
931     unsigned ref_count = 0;
932     hb_vector_t<unsigned> end_points;
933 
934     for (unsigned i = 0; i < point_count; i++)
935     {
936       if (indices.arrayZ[i])
937         ref_count++;
938       if (orig_points.arrayZ[i].is_end_point)
939         end_points.push (i);
940     }
941     /* all points are referenced, nothing to do */
942     if (ref_count == point_count)
943       return true;
944     if (unlikely (end_points.in_error ())) return false;
945 
946     hb_set_t inferred_idxes;
947     unsigned start_point = 0;
948     for (unsigned end_point : end_points)
949     {
950       /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */
951       unsigned unref_count = 0;
952       for (unsigned i = start_point; i < end_point + 1; i++)
953         unref_count += indices.arrayZ[i];
954       unref_count = (end_point - start_point + 1) - unref_count;
955 
956       unsigned j = start_point;
957       if (unref_count == 0 || unref_count > end_point - start_point)
958         goto no_more_gaps;
959       for (;;)
960       {
961         /* Locate the next gap of unreferenced points between two referenced points prev and next.
962          * Note that a gap may wrap around at left (start_point) and/or at right (end_point).
963          */
964         unsigned int prev, next, i;
965         for (;;)
966         {
967           i = j;
968           j = next_index (i, start_point, end_point);
969           if (indices.arrayZ[i] && !indices.arrayZ[j]) break;
970         }
971         prev = j = i;
972         for (;;)
973         {
974           i = j;
975           j = next_index (i, start_point, end_point);
976           if (!indices.arrayZ[i] && indices.arrayZ[j]) break;
977         }
978         next = j;
979        /* Infer deltas for all unref points in the gap between prev and next */
980         i = prev;
981         for (;;)
982         {
983           i = next_index (i, start_point, end_point);
984           if (i == next) break;
985           deltas_x.arrayZ[i] = infer_delta (orig_points.arrayZ[i].x, orig_points.arrayZ[prev].x, orig_points.arrayZ[next].x,
986                                             deltas_x.arrayZ[prev], deltas_x.arrayZ[next]);
987           deltas_y.arrayZ[i] = infer_delta (orig_points.arrayZ[i].y, orig_points.arrayZ[prev].y, orig_points.arrayZ[next].y,
988                                             deltas_y.arrayZ[prev], deltas_y.arrayZ[next]);
989           inferred_idxes.add (i);
990           if (--unref_count == 0) goto no_more_gaps;
991         }
992       }
993     no_more_gaps:
994       start_point = end_point + 1;
995     }
996 
997     for (unsigned i = 0; i < point_count; i++)
998     {
999       /* if points are not referenced and deltas are not inferred, set to 0.
1000        * reference all points for gvar */
1001       if ( !indices[i])
1002       {
1003         if (!inferred_idxes.has (i))
1004         {
1005           deltas_x.arrayZ[i] = 0.f;
1006           deltas_y.arrayZ[i] = 0.f;
1007         }
1008         indices[i] = true;
1009       }
1010     }
1011     return true;
1012   }
1013 
infer_deltaOT::tuple_delta_t1014   static float infer_delta (float target_val, float prev_val, float next_val, float prev_delta, float next_delta)
1015   {
1016     if (prev_val == next_val)
1017       return (prev_delta == next_delta) ? prev_delta : 0.f;
1018     else if (target_val <= hb_min (prev_val, next_val))
1019       return (prev_val < next_val) ? prev_delta : next_delta;
1020     else if (target_val >= hb_max (prev_val, next_val))
1021       return (prev_val > next_val) ? prev_delta : next_delta;
1022 
1023     float r = (target_val - prev_val) / (next_val - prev_val);
1024     return prev_delta + r * (next_delta - prev_delta);
1025   }
1026 
next_indexOT::tuple_delta_t1027   static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end)
1028   { return (i >= end) ? start : (i + 1); }
1029 };
1030 
1031 struct TupleVariationData
1032 {
sanitizeOT::TupleVariationData1033   bool sanitize (hb_sanitize_context_t *c) const
1034   {
1035     TRACE_SANITIZE (this);
1036     // here check on min_size only, TupleVariationHeader and var data will be
1037     // checked while accessing through iterator.
1038     return_trace (c->check_struct (this));
1039   }
1040 
get_sizeOT::TupleVariationData1041   unsigned get_size (unsigned axis_count) const
1042   {
1043     unsigned total_size = min_size;
1044     unsigned count = tupleVarCount.get_count ();
1045     const TupleVariationHeader *tuple_var_header = &(get_tuple_var_header());
1046     for (unsigned i = 0; i < count; i++)
1047     {
1048       total_size += tuple_var_header->get_size (axis_count) + tuple_var_header->get_data_size ();
1049       tuple_var_header = &tuple_var_header->get_next (axis_count);
1050     }
1051 
1052     return total_size;
1053   }
1054 
get_tuple_var_headerOT::TupleVariationData1055   const TupleVariationHeader &get_tuple_var_header (void) const
1056   { return StructAfter<TupleVariationHeader> (data); }
1057 
1058   struct tuple_iterator_t;
1059   struct tuple_variations_t
1060   {
1061     hb_vector_t<tuple_delta_t> tuple_vars;
1062 
1063     private:
1064     /* referenced point set->compiled point data map */
1065     hb_hashmap_t<const hb_vector_t<bool>*, hb_bytes_t> point_data_map;
1066     /* referenced point set-> count map, used in finding shared points */
1067     hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map;
1068 
1069     /* empty for non-gvar tuples.
1070      * shared_points_bytes is just a copy of some value in the point_data_map,
1071      * which will be freed during map destruction. Save it for serialization, so
1072      * no need to do find_shared_points () again */
1073     hb_bytes_t shared_points_bytes;
1074 
1075     /* total compiled byte size as TupleVariationData format, initialized to its
1076      * min_size: 4 */
1077     unsigned compiled_byte_size = 4;
1078 
1079     public:
1080     tuple_variations_t () = default;
1081     tuple_variations_t (const tuple_variations_t&) = delete;
1082     tuple_variations_t& operator=(const tuple_variations_t&) = delete;
1083     tuple_variations_t (tuple_variations_t&&) = default;
1084     tuple_variations_t& operator=(tuple_variations_t&&) = default;
~tuple_variations_tOT::TupleVariationData::tuple_variations_t1085     ~tuple_variations_t () { fini (); }
finiOT::TupleVariationData::tuple_variations_t1086     void fini ()
1087     {
1088       for (auto _ : point_data_map.values ())
1089         _.fini ();
1090 
1091       point_set_count_map.fini ();
1092       tuple_vars.fini ();
1093     }
1094 
operator boolOT::TupleVariationData::tuple_variations_t1095     explicit operator bool () const { return bool (tuple_vars); }
get_var_countOT::TupleVariationData::tuple_variations_t1096     unsigned get_var_count () const
1097     {
1098       unsigned count = tuple_vars.length;
1099       if (shared_points_bytes.length)
1100         count |= TupleVarCount::SharedPointNumbers;
1101       return count;
1102     }
1103 
get_compiled_byte_sizeOT::TupleVariationData::tuple_variations_t1104     unsigned get_compiled_byte_size () const
1105     { return compiled_byte_size; }
1106 
create_from_tuple_var_dataOT::TupleVariationData::tuple_variations_t1107     bool create_from_tuple_var_data (tuple_iterator_t iterator,
1108                                      unsigned tuple_var_count,
1109                                      unsigned point_count,
1110                                      bool is_gvar,
1111                                      const hb_map_t *axes_old_index_tag_map,
1112                                      const hb_vector_t<unsigned> &shared_indices,
1113                                      const hb_array_t<const F2DOT14> shared_tuples)
1114     {
1115       do
1116       {
1117         const HBUINT8 *p = iterator.get_serialized_data ();
1118         unsigned int length = iterator.current_tuple->get_data_size ();
1119         if (unlikely (!iterator.var_data_bytes.check_range (p, length)))
1120         { fini (); return false; }
1121 
1122         hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
1123         if (!iterator.current_tuple->unpack_axis_tuples (iterator.get_axis_count (), shared_tuples, axes_old_index_tag_map, axis_tuples)
1124             || axis_tuples.is_empty ())
1125         { fini (); return false; }
1126 
1127         hb_vector_t<unsigned> private_indices;
1128         bool has_private_points = iterator.current_tuple->has_private_points ();
1129         const HBUINT8 *end = p + length;
1130         if (has_private_points &&
1131             !TupleVariationData::unpack_points (p, private_indices, end))
1132         { fini (); return false; }
1133 
1134         const hb_vector_t<unsigned> &indices = has_private_points ? private_indices : shared_indices;
1135         bool apply_to_all = (indices.length == 0);
1136         unsigned num_deltas = apply_to_all ? point_count : indices.length;
1137 
1138         hb_vector_t<int> deltas_x;
1139 
1140         if (unlikely (!deltas_x.resize (num_deltas, false) ||
1141                       !TupleVariationData::unpack_deltas (p, deltas_x, end)))
1142         { fini (); return false; }
1143 
1144         hb_vector_t<int> deltas_y;
1145         if (is_gvar)
1146         {
1147           if (unlikely (!deltas_y.resize (num_deltas, false) ||
1148                         !TupleVariationData::unpack_deltas (p, deltas_y, end)))
1149           { fini (); return false; }
1150         }
1151 
1152         tuple_delta_t var;
1153         var.axis_tuples = std::move (axis_tuples);
1154         if (unlikely (!var.indices.resize (point_count) ||
1155                       !var.deltas_x.resize (point_count, false)))
1156         { fini (); return false; }
1157 
1158         if (is_gvar && unlikely (!var.deltas_y.resize (point_count, false)))
1159         { fini (); return false; }
1160 
1161         for (unsigned i = 0; i < num_deltas; i++)
1162         {
1163           unsigned idx = apply_to_all ? i : indices[i];
1164           if (idx >= point_count) continue;
1165           var.indices[idx] = true;
1166           var.deltas_x[idx] = static_cast<float> (deltas_x[i]);
1167           if (is_gvar)
1168             var.deltas_y[idx] = static_cast<float> (deltas_y[i]);
1169         }
1170         tuple_vars.push (std::move (var));
1171       } while (iterator.move_to_next ());
1172       return true;
1173     }
1174 
create_from_item_var_dataOT::TupleVariationData::tuple_variations_t1175     bool create_from_item_var_data (const VarData &var_data,
1176                                     const hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>>& regions,
1177                                     const hb_map_t& axes_old_index_tag_map,
1178                                     const hb_inc_bimap_t* inner_map = nullptr)
1179     {
1180       /* NULL offset, to keep original varidx valid, just return */
1181       if (&var_data == &Null (VarData))
1182         return true;
1183 
1184       unsigned num_regions = var_data.get_region_index_count ();
1185       if (!tuple_vars.alloc (num_regions)) return false;
1186 
1187       unsigned item_count = inner_map ? inner_map->get_population () : var_data.get_item_count ();
1188       unsigned row_size = var_data.get_row_size ();
1189       const HBUINT8 *delta_bytes = var_data.get_delta_bytes ();
1190 
1191       for (unsigned r = 0; r < num_regions; r++)
1192       {
1193         /* In VarData, deltas are organized in rows, convert them into
1194          * column(region) based tuples, resize deltas_x first */
1195         tuple_delta_t tuple;
1196         if (!tuple.deltas_x.resize (item_count, false) ||
1197             !tuple.indices.resize (item_count, false))
1198           return false;
1199 
1200         for (unsigned i = 0; i < item_count; i++)
1201         {
1202           tuple.indices.arrayZ[i] = true;
1203           tuple.deltas_x.arrayZ[i] = var_data.get_item_delta_fast (inner_map ? inner_map->backward (i) : i,
1204                                                                    r, delta_bytes, row_size);
1205         }
1206 
1207         unsigned region_index = var_data.get_region_index (r);
1208         if (region_index >= regions.length) return false;
1209         tuple.axis_tuples = regions.arrayZ[region_index];
1210 
1211         tuple_vars.push (std::move (tuple));
1212       }
1213       return !tuple_vars.in_error ();
1214     }
1215 
1216     private:
_cmp_axis_tagOT::TupleVariationData::tuple_variations_t1217     static int _cmp_axis_tag (const void *pa, const void *pb)
1218     {
1219       const hb_tag_t *a = (const hb_tag_t*) pa;
1220       const hb_tag_t *b = (const hb_tag_t*) pb;
1221       return (int)(*a) - (int)(*b);
1222     }
1223 
change_tuple_variations_axis_limitsOT::TupleVariationData::tuple_variations_t1224     bool change_tuple_variations_axis_limits (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1225                                               const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1226     {
1227       /* sort axis_tag/axis_limits, make result deterministic */
1228       hb_vector_t<hb_tag_t> axis_tags;
1229       if (!axis_tags.alloc (normalized_axes_location.get_population ()))
1230         return false;
1231       for (auto t : normalized_axes_location.keys ())
1232         axis_tags.push (t);
1233 
1234       axis_tags.qsort (_cmp_axis_tag);
1235       for (auto axis_tag : axis_tags)
1236       {
1237         Triple *axis_limit;
1238         if (!normalized_axes_location.has (axis_tag, &axis_limit))
1239           return false;
1240         TripleDistances axis_triple_distances{1.f, 1.f};
1241         if (axes_triple_distances.has (axis_tag))
1242           axis_triple_distances = axes_triple_distances.get (axis_tag);
1243 
1244         hb_vector_t<tuple_delta_t> new_vars;
1245         for (const tuple_delta_t& var : tuple_vars)
1246         {
1247           hb_vector_t<tuple_delta_t> out = var.change_tuple_var_axis_limit (axis_tag, *axis_limit, axis_triple_distances);
1248           if (!out) continue;
1249 
1250           unsigned new_len = new_vars.length + out.length;
1251 
1252           if (unlikely (!new_vars.alloc (new_len, false)))
1253           { fini (); return false;}
1254 
1255           for (unsigned i = 0; i < out.length; i++)
1256             new_vars.push (std::move (out[i]));
1257         }
1258         tuple_vars.fini ();
1259         tuple_vars = std::move (new_vars);
1260       }
1261       return true;
1262     }
1263 
1264     /* merge tuple variations with overlapping tents */
merge_tuple_variationsOT::TupleVariationData::tuple_variations_t1265     void merge_tuple_variations ()
1266     {
1267       hb_vector_t<tuple_delta_t> new_vars;
1268       hb_hashmap_t<const hb_hashmap_t<hb_tag_t, Triple>*, unsigned> m;
1269       unsigned i = 0;
1270       for (const tuple_delta_t& var : tuple_vars)
1271       {
1272         /* if all axes are pinned, drop the tuple variation */
1273         if (var.axis_tuples.is_empty ()) continue;
1274 
1275         unsigned *idx;
1276         if (m.has (&(var.axis_tuples), &idx))
1277         {
1278           new_vars[*idx] += var;
1279         }
1280         else
1281         {
1282           new_vars.push (var);
1283           m.set (&(var.axis_tuples), i);
1284           i++;
1285         }
1286       }
1287       tuple_vars.fini ();
1288       tuple_vars = std::move (new_vars);
1289     }
1290 
compile_point_setOT::TupleVariationData::tuple_variations_t1291     hb_bytes_t compile_point_set (const hb_vector_t<bool> &point_indices)
1292     {
1293       unsigned num_points = 0;
1294       for (bool i : point_indices)
1295         if (i) num_points++;
1296 
1297       unsigned indices_length = point_indices.length;
1298       /* If the points set consists of all points in the glyph, it's encoded with a
1299        * single zero byte */
1300       if (num_points == indices_length)
1301       {
1302         char *p = (char *) hb_calloc (1, sizeof (char));
1303         if (unlikely (!p)) return hb_bytes_t ();
1304 
1305         return hb_bytes_t (p, 1);
1306       }
1307 
1308       /* allocate enough memories: 2 bytes for count + 3 bytes for each point */
1309       unsigned num_bytes = 2 + 3 *num_points;
1310       char *p = (char *) hb_calloc (num_bytes, sizeof (char));
1311       if (unlikely (!p)) return hb_bytes_t ();
1312 
1313       unsigned pos = 0;
1314       /* binary data starts with the total number of reference points */
1315       if (num_points < 0x80)
1316         p[pos++] = num_points;
1317       else
1318       {
1319         p[pos++] = ((num_points >> 8) | 0x80);
1320         p[pos++] = num_points & 0xFF;
1321       }
1322 
1323       const unsigned max_run_length = 0x7F;
1324       unsigned i = 0;
1325       unsigned last_value = 0;
1326       unsigned num_encoded = 0;
1327       while (i < indices_length && num_encoded < num_points)
1328       {
1329         unsigned run_length = 0;
1330         unsigned header_pos = pos;
1331         p[pos++] = 0;
1332 
1333         bool use_byte_encoding = false;
1334         bool new_run = true;
1335         while (i < indices_length && num_encoded < num_points &&
1336                run_length <= max_run_length)
1337         {
1338           // find out next referenced point index
1339           while (i < indices_length && !point_indices[i])
1340             i++;
1341 
1342           if (i >= indices_length) break;
1343 
1344           unsigned cur_value = i;
1345           unsigned delta = cur_value - last_value;
1346 
1347           if (new_run)
1348           {
1349             use_byte_encoding = (delta <= 0xFF);
1350             new_run = false;
1351           }
1352 
1353           if (use_byte_encoding && delta > 0xFF)
1354             break;
1355 
1356           if (use_byte_encoding)
1357             p[pos++] = delta;
1358           else
1359           {
1360             p[pos++] = delta >> 8;
1361             p[pos++] = delta & 0xFF;
1362           }
1363           i++;
1364           last_value = cur_value;
1365           run_length++;
1366           num_encoded++;
1367         }
1368 
1369         if (use_byte_encoding)
1370           p[header_pos] = run_length - 1;
1371         else
1372           p[header_pos] = (run_length - 1) | 0x80;
1373       }
1374       return hb_bytes_t (p, pos);
1375     }
1376 
1377     /* compile all point set and store byte data in a point_set->hb_bytes_t hashmap,
1378      * also update point_set->count map, which will be used in finding shared
1379      * point set*/
compile_all_point_setsOT::TupleVariationData::tuple_variations_t1380     bool compile_all_point_sets ()
1381     {
1382       for (const auto& tuple: tuple_vars)
1383       {
1384         const hb_vector_t<bool>* points_set = &(tuple.indices);
1385         if (point_data_map.has (points_set))
1386         {
1387           unsigned *count;
1388           if (unlikely (!point_set_count_map.has (points_set, &count) ||
1389                         !point_set_count_map.set (points_set, (*count) + 1)))
1390             return false;
1391           continue;
1392         }
1393 
1394         hb_bytes_t compiled_data = compile_point_set (*points_set);
1395         if (unlikely (compiled_data == hb_bytes_t ()))
1396           return false;
1397 
1398         if (!point_data_map.set (points_set, compiled_data) ||
1399             !point_set_count_map.set (points_set, 1))
1400           return false;
1401       }
1402       return true;
1403     }
1404 
1405     /* find shared points set which saves most bytes */
find_shared_pointsOT::TupleVariationData::tuple_variations_t1406     hb_bytes_t find_shared_points ()
1407     {
1408       unsigned max_saved_bytes = 0;
1409       hb_bytes_t res{};
1410 
1411       for (const auto& _ : point_data_map.iter ())
1412       {
1413         const hb_vector_t<bool>* points_set = _.first;
1414         unsigned data_length = _.second.length;
1415         unsigned *count;
1416         if (unlikely (!point_set_count_map.has (points_set, &count) ||
1417                       *count <= 1))
1418           return hb_bytes_t ();
1419 
1420         unsigned saved_bytes = data_length * ((*count) -1);
1421         if (saved_bytes > max_saved_bytes)
1422         {
1423           max_saved_bytes = saved_bytes;
1424           res = _.second;
1425         }
1426       }
1427       return res;
1428     }
1429 
calc_inferred_deltasOT::TupleVariationData::tuple_variations_t1430     bool calc_inferred_deltas (contour_point_vector_t& contour_points)
1431     {
1432       for (tuple_delta_t& var : tuple_vars)
1433         if (!var.calc_inferred_deltas (contour_points))
1434           return false;
1435 
1436       return true;
1437     }
1438 
1439     public:
instantiateOT::TupleVariationData::tuple_variations_t1440     bool instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1441                       const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances,
1442                       contour_point_vector_t* contour_points = nullptr)
1443     {
1444       if (!tuple_vars) return true;
1445       if (!change_tuple_variations_axis_limits (normalized_axes_location, axes_triple_distances))
1446         return false;
1447       /* compute inferred deltas only for gvar */
1448       if (contour_points)
1449         if (!calc_inferred_deltas (*contour_points))
1450           return false;
1451 
1452       merge_tuple_variations ();
1453       return !tuple_vars.in_error ();
1454     }
1455 
compile_bytesOT::TupleVariationData::tuple_variations_t1456     bool compile_bytes (const hb_map_t& axes_index_map,
1457                         const hb_map_t& axes_old_index_tag_map,
1458                         bool use_shared_points,
1459                         const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map = nullptr)
1460     {
1461       // compile points set and store data in hashmap
1462       if (!compile_all_point_sets ())
1463         return false;
1464 
1465       if (use_shared_points)
1466       {
1467         shared_points_bytes = find_shared_points ();
1468         compiled_byte_size += shared_points_bytes.length;
1469       }
1470       // compile delta and tuple var header for each tuple variation
1471       for (auto& tuple: tuple_vars)
1472       {
1473         const hb_vector_t<bool>* points_set = &(tuple.indices);
1474         hb_bytes_t *points_data;
1475         if (unlikely (!point_data_map.has (points_set, &points_data)))
1476           return false;
1477 
1478         if (!tuple.compile_deltas ())
1479           return false;
1480 
1481         unsigned points_data_length = (*points_data != shared_points_bytes) ? points_data->length : 0;
1482         if (!tuple.compile_tuple_var_header (axes_index_map, points_data_length, axes_old_index_tag_map,
1483                                              shared_tuples_idx_map))
1484           return false;
1485         compiled_byte_size += tuple.compiled_tuple_header.length + points_data_length + tuple.compiled_deltas.length;
1486       }
1487       return true;
1488     }
1489 
serialize_var_headersOT::TupleVariationData::tuple_variations_t1490     bool serialize_var_headers (hb_serialize_context_t *c, unsigned& total_header_len) const
1491     {
1492       TRACE_SERIALIZE (this);
1493       for (const auto& tuple: tuple_vars)
1494       {
1495         tuple.compiled_tuple_header.as_array ().copy (c);
1496         if (c->in_error ()) return_trace (false);
1497         total_header_len += tuple.compiled_tuple_header.length;
1498       }
1499       return_trace (true);
1500     }
1501 
serialize_var_dataOT::TupleVariationData::tuple_variations_t1502     bool serialize_var_data (hb_serialize_context_t *c, bool is_gvar) const
1503     {
1504       TRACE_SERIALIZE (this);
1505       if (is_gvar)
1506         shared_points_bytes.copy (c);
1507 
1508       for (const auto& tuple: tuple_vars)
1509       {
1510         const hb_vector_t<bool>* points_set = &(tuple.indices);
1511         hb_bytes_t *point_data;
1512         if (!point_data_map.has (points_set, &point_data))
1513           return_trace (false);
1514 
1515         if (!is_gvar || *point_data != shared_points_bytes)
1516           point_data->copy (c);
1517 
1518         tuple.compiled_deltas.as_array ().copy (c);
1519         if (c->in_error ()) return_trace (false);
1520       }
1521 
1522       /* padding for gvar */
1523       if (is_gvar && (compiled_byte_size % 2))
1524       {
1525         HBUINT8 pad;
1526         pad = 0;
1527         if (!c->embed (pad)) return_trace (false);
1528       }
1529       return_trace (true);
1530     }
1531   };
1532 
1533   struct tuple_iterator_t
1534   {
get_axis_countOT::TupleVariationData::tuple_iterator_t1535     unsigned get_axis_count () const { return axis_count; }
1536 
initOT::TupleVariationData::tuple_iterator_t1537     void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_, const void *table_base_)
1538     {
1539       var_data_bytes = var_data_bytes_;
1540       var_data = var_data_bytes_.as<TupleVariationData> ();
1541       index = 0;
1542       axis_count = axis_count_;
1543       current_tuple = &var_data->get_tuple_var_header ();
1544       data_offset = 0;
1545       table_base = table_base_;
1546     }
1547 
get_shared_indicesOT::TupleVariationData::tuple_iterator_t1548     bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
1549     {
1550       if (var_data->has_shared_point_numbers ())
1551       {
1552         const HBUINT8 *base = &(table_base+var_data->data);
1553         const HBUINT8 *p = base;
1554         if (!unpack_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
1555         data_offset = p - base;
1556       }
1557       return true;
1558     }
1559 
is_validOT::TupleVariationData::tuple_iterator_t1560     bool is_valid () const
1561     {
1562       return (index < var_data->tupleVarCount.get_count ()) &&
1563              var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
1564              var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (),
1565                                                                 current_tuple->get_size (axis_count)));
1566     }
1567 
move_to_nextOT::TupleVariationData::tuple_iterator_t1568     bool move_to_next ()
1569     {
1570       data_offset += current_tuple->get_data_size ();
1571       current_tuple = &current_tuple->get_next (axis_count);
1572       index++;
1573       return is_valid ();
1574     }
1575 
get_serialized_dataOT::TupleVariationData::tuple_iterator_t1576     const HBUINT8 *get_serialized_data () const
1577     { return &(table_base+var_data->data) + data_offset; }
1578 
1579     private:
1580     const TupleVariationData *var_data;
1581     unsigned int index;
1582     unsigned int axis_count;
1583     unsigned int data_offset;
1584     const void *table_base;
1585 
1586     public:
1587     hb_bytes_t var_data_bytes;
1588     const TupleVariationHeader *current_tuple;
1589   };
1590 
get_tuple_iteratorOT::TupleVariationData1591   static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
1592                                   const void *table_base,
1593                                   hb_vector_t<unsigned int> &shared_indices /* OUT */,
1594                                   tuple_iterator_t *iterator /* OUT */)
1595   {
1596     iterator->init (var_data_bytes, axis_count, table_base);
1597     if (!iterator->get_shared_indices (shared_indices))
1598       return false;
1599     return iterator->is_valid ();
1600   }
1601 
has_shared_point_numbersOT::TupleVariationData1602   bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
1603 
unpack_pointsOT::TupleVariationData1604   static bool unpack_points (const HBUINT8 *&p /* IN/OUT */,
1605                              hb_vector_t<unsigned int> &points /* OUT */,
1606                              const HBUINT8 *end)
1607   {
1608     enum packed_point_flag_t
1609     {
1610       POINTS_ARE_WORDS     = 0x80,
1611       POINT_RUN_COUNT_MASK = 0x7F
1612     };
1613 
1614     if (unlikely (p + 1 > end)) return false;
1615 
1616     unsigned count = *p++;
1617     if (count & POINTS_ARE_WORDS)
1618     {
1619       if (unlikely (p + 1 > end)) return false;
1620       count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
1621     }
1622     if (unlikely (!points.resize (count, false))) return false;
1623 
1624     unsigned n = 0;
1625     unsigned i = 0;
1626     while (i < count)
1627     {
1628       if (unlikely (p + 1 > end)) return false;
1629       unsigned control = *p++;
1630       unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
1631       unsigned stop = i + run_count;
1632       if (unlikely (stop > count)) return false;
1633       if (control & POINTS_ARE_WORDS)
1634       {
1635         if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
1636         for (; i < stop; i++)
1637         {
1638           n += *(const HBUINT16 *)p;
1639           points.arrayZ[i] = n;
1640           p += HBUINT16::static_size;
1641         }
1642       }
1643       else
1644       {
1645         if (unlikely (p + run_count > end)) return false;
1646         for (; i < stop; i++)
1647         {
1648           n += *p++;
1649           points.arrayZ[i] = n;
1650         }
1651       }
1652     }
1653     return true;
1654   }
1655 
unpack_deltasOT::TupleVariationData1656   static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */,
1657                              hb_vector_t<int> &deltas /* IN/OUT */,
1658                              const HBUINT8 *end)
1659   {
1660     unsigned i = 0;
1661     unsigned count = deltas.length;
1662     while (i < count)
1663     {
1664       if (unlikely (p + 1 > end)) return false;
1665       unsigned control = *p++;
1666       unsigned run_count = (control & DELTA_RUN_COUNT_MASK) + 1;
1667       unsigned stop = i + run_count;
1668       if (unlikely (stop > count)) return false;
1669       if (control & DELTAS_ARE_ZERO)
1670       {
1671         for (; i < stop; i++)
1672           deltas.arrayZ[i] = 0;
1673       }
1674       else if (control & DELTAS_ARE_WORDS)
1675       {
1676         if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
1677         for (; i < stop; i++)
1678         {
1679           deltas.arrayZ[i] = * (const HBINT16 *) p;
1680           p += HBUINT16::static_size;
1681         }
1682       }
1683       else
1684       {
1685         if (unlikely (p + run_count > end)) return false;
1686         for (; i < stop; i++)
1687         {
1688           deltas.arrayZ[i] = * (const HBINT8 *) p++;
1689         }
1690       }
1691     }
1692     return true;
1693   }
1694 
has_dataOT::TupleVariationData1695   bool has_data () const { return tupleVarCount; }
1696 
decompile_tuple_variationsOT::TupleVariationData1697   bool decompile_tuple_variations (unsigned point_count,
1698                                    bool is_gvar,
1699                                    tuple_iterator_t iterator,
1700                                    const hb_map_t *axes_old_index_tag_map,
1701                                    const hb_vector_t<unsigned> &shared_indices,
1702                                    const hb_array_t<const F2DOT14> shared_tuples,
1703                                    tuple_variations_t& tuple_variations /* OUT */) const
1704   {
1705     return tuple_variations.create_from_tuple_var_data (iterator, tupleVarCount,
1706                                                         point_count, is_gvar,
1707                                                         axes_old_index_tag_map,
1708                                                         shared_indices,
1709                                                         shared_tuples);
1710   }
1711 
serializeOT::TupleVariationData1712   bool serialize (hb_serialize_context_t *c,
1713                   bool is_gvar,
1714                   const tuple_variations_t& tuple_variations) const
1715   {
1716     TRACE_SERIALIZE (this);
1717     /* empty tuple variations, just return and skip serialization. */
1718     if (!tuple_variations) return_trace (true);
1719 
1720     auto *out = c->start_embed (this);
1721     if (unlikely (!c->extend_min (out))) return_trace (false);
1722 
1723     if (!c->check_assign (out->tupleVarCount, tuple_variations.get_var_count (),
1724                           HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1725 
1726     unsigned total_header_len = 0;
1727 
1728     if (!tuple_variations.serialize_var_headers (c, total_header_len))
1729       return_trace (false);
1730 
1731     unsigned data_offset = min_size + total_header_len;
1732     if (!is_gvar) data_offset += 4;
1733     if (!c->check_assign (out->data, data_offset, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1734 
1735     return tuple_variations.serialize_var_data (c, is_gvar);
1736   }
1737 
1738   protected:
1739   struct TupleVarCount : HBUINT16
1740   {
1741     friend struct tuple_variations_t;
has_shared_point_numbersOT::TupleVariationData::TupleVarCount1742     bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
get_countOT::TupleVariationData::TupleVarCount1743     unsigned int get_count () const { return (*this) & CountMask; }
operator =OT::TupleVariationData::TupleVarCount1744     TupleVarCount& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
operator boolOT::TupleVariationData::TupleVarCount1745     explicit operator bool () const { return get_count (); }
1746 
1747     protected:
1748     enum Flags
1749     {
1750       SharedPointNumbers= 0x8000u,
1751       CountMask         = 0x0FFFu
1752     };
1753     public:
1754     DEFINE_SIZE_STATIC (2);
1755   };
1756 
1757   TupleVarCount tupleVarCount;  /* A packed field. The high 4 bits are flags, and the
1758                                  * low 12 bits are the number of tuple variation tables
1759                                  * for this glyph. The number of tuple variation tables
1760                                  * can be any number between 1 and 4095. */
1761   Offset16To<HBUINT8>
1762                 data;           /* Offset from the start of the base table
1763                                  * to the serialized data. */
1764   /* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
1765   public:
1766   DEFINE_SIZE_MIN (4);
1767 };
1768 
1769 using tuple_variations_t = TupleVariationData::tuple_variations_t;
1770 struct item_variations_t
1771 {
1772   using region_t = const hb_hashmap_t<hb_tag_t, Triple>*;
1773   private:
1774   /* each subtable is decompiled into a tuple_variations_t, in which all tuples
1775    * have the same num of deltas (rows) */
1776   hb_vector_t<tuple_variations_t> vars;
1777 
1778   /* original region list, decompiled from item varstore, used when rebuilding
1779    * region list after instantiation */
1780   hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>> orig_region_list;
1781 
1782   /* region list: vector of Regions, maintain the original order for the regions
1783    * that existed before instantiate (), append the new regions at the end.
1784    * Regions are stored in each tuple already, save pointers only.
1785    * When converting back to item varstore, unused regions will be pruned */
1786   hb_vector_t<region_t> region_list;
1787 
1788   /* region -> idx map after instantiation and pruning unused regions */
1789   hb_hashmap_t<region_t, unsigned> region_map;
1790 
1791   /* all delta rows after instantiation */
1792   hb_vector_t<hb_vector_t<int>> delta_rows;
1793   /* final optimized vector of encoding objects used to assemble the varstore */
1794   hb_vector_t<delta_row_encoding_t> encodings;
1795 
1796   /* old varidxes -> new var_idxes map */
1797   hb_map_t varidx_map;
1798 
1799   /* has long words */
1800   bool has_long = false;
1801 
1802   public:
has_long_wordOT::item_variations_t1803   bool has_long_word () const
1804   { return has_long; }
1805 
get_region_listOT::item_variations_t1806   const hb_vector_t<region_t>& get_region_list () const
1807   { return region_list; }
1808 
get_vardata_encodingsOT::item_variations_t1809   const hb_vector_t<delta_row_encoding_t>& get_vardata_encodings () const
1810   { return encodings; }
1811 
get_varidx_mapOT::item_variations_t1812   const hb_map_t& get_varidx_map () const
1813   { return varidx_map; }
1814 
instantiateOT::item_variations_t1815   bool instantiate (const VariationStore& varStore,
1816                     const hb_subset_plan_t *plan,
1817                     bool optimize=true,
1818                     bool use_no_variation_idx=true,
1819                     const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ())
1820   {
1821     if (!create_from_item_varstore (varStore, plan->axes_old_index_tag_map, inner_maps))
1822       return false;
1823     if (!instantiate_tuple_vars (plan->axes_location, plan->axes_triple_distances))
1824       return false;
1825     return as_item_varstore (optimize, use_no_variation_idx);
1826   }
1827 
1828   /* keep below APIs public only for unit test: test-item-varstore */
create_from_item_varstoreOT::item_variations_t1829   bool create_from_item_varstore (const VariationStore& varStore,
1830                                   const hb_map_t& axes_old_index_tag_map,
1831                                   const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ())
1832   {
1833     const VarRegionList& regionList = varStore.get_region_list ();
1834     if (!regionList.get_var_regions (axes_old_index_tag_map, orig_region_list))
1835       return false;
1836 
1837     unsigned num_var_data = varStore.get_sub_table_count ();
1838     if (inner_maps && inner_maps.length != num_var_data) return false;
1839     if (!vars.alloc (num_var_data)) return false;
1840 
1841     for (unsigned i = 0; i < num_var_data; i++)
1842     {
1843       if (inner_maps && !inner_maps.arrayZ[i].get_population ())
1844           continue;
1845       tuple_variations_t var_data_tuples;
1846       if (!var_data_tuples.create_from_item_var_data (varStore.get_sub_table (i),
1847                                                       orig_region_list,
1848                                                       axes_old_index_tag_map,
1849                                                       inner_maps ? &(inner_maps.arrayZ[i]) : nullptr))
1850         return false;
1851 
1852       vars.push (std::move (var_data_tuples));
1853     }
1854     return !vars.in_error ();
1855   }
1856 
instantiate_tuple_varsOT::item_variations_t1857   bool instantiate_tuple_vars (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1858                                const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1859   {
1860     for (tuple_variations_t& tuple_vars : vars)
1861       if (!tuple_vars.instantiate (normalized_axes_location, axes_triple_distances))
1862         return false;
1863 
1864     if (!build_region_list ()) return false;
1865     return true;
1866   }
1867 
build_region_listOT::item_variations_t1868   bool build_region_list ()
1869   {
1870     /* scan all tuples and collect all unique regions, prune unused regions */
1871     hb_hashmap_t<region_t, unsigned> all_regions;
1872     hb_hashmap_t<region_t, unsigned> used_regions;
1873 
1874     /* use a vector when inserting new regions, make result deterministic */
1875     hb_vector_t<region_t> all_unique_regions;
1876     for (const tuple_variations_t& sub_table : vars)
1877     {
1878       for (const tuple_delta_t& tuple : sub_table.tuple_vars)
1879       {
1880         region_t r = &(tuple.axis_tuples);
1881         if (!used_regions.has (r))
1882         {
1883           bool all_zeros = true;
1884           for (float d : tuple.deltas_x)
1885           {
1886             int delta = (int) roundf (d);
1887             if (delta != 0)
1888             {
1889               all_zeros = false;
1890               break;
1891             }
1892           }
1893           if (!all_zeros)
1894           {
1895             if (!used_regions.set (r, 1))
1896               return false;
1897           }
1898         }
1899         if (all_regions.has (r))
1900           continue;
1901         if (!all_regions.set (r, 1))
1902           return false;
1903         all_unique_regions.push (r);
1904       }
1905     }
1906 
1907     if (!all_regions || !all_unique_regions) return false;
1908     if (!region_list.alloc (all_regions.get_population ()))
1909       return false;
1910 
1911     unsigned idx = 0;
1912     /* append the original regions that pre-existed */
1913     for (const auto& r : orig_region_list)
1914     {
1915       if (!all_regions.has (&r) || !used_regions.has (&r))
1916         continue;
1917 
1918       region_list.push (&r);
1919       if (!region_map.set (&r, idx))
1920         return false;
1921       all_regions.del (&r);
1922       idx++;
1923     }
1924 
1925     /* append the new regions at the end */
1926     for (const auto& r: all_unique_regions)
1927     {
1928       if (!all_regions.has (r) || !used_regions.has (r))
1929         continue;
1930       region_list.push (r);
1931       if (!region_map.set (r, idx))
1932         return false;
1933       all_regions.del (r);
1934       idx++;
1935     }
1936     return (!region_list.in_error ()) && (!region_map.in_error ());
1937   }
1938 
1939   /* main algorithm ported from fonttools VarStore_optimize() method, optimize
1940    * varstore by default */
1941 
1942   struct combined_gain_idx_tuple_t
1943   {
1944     int gain;
1945     unsigned idx_1;
1946     unsigned idx_2;
1947 
1948     combined_gain_idx_tuple_t () = default;
combined_gain_idx_tuple_tOT::item_variations_t::combined_gain_idx_tuple_t1949     combined_gain_idx_tuple_t (int gain_, unsigned i, unsigned j)
1950         :gain (gain_), idx_1 (i), idx_2 (j) {}
1951 
operator <OT::item_variations_t::combined_gain_idx_tuple_t1952     bool operator < (const combined_gain_idx_tuple_t& o)
1953     {
1954       if (gain != o.gain)
1955         return gain < o.gain;
1956 
1957       if (idx_1 != o.idx_1)
1958         return idx_1 < o.idx_1;
1959 
1960       return idx_2 < o.idx_2;
1961     }
1962 
operator <=OT::item_variations_t::combined_gain_idx_tuple_t1963     bool operator <= (const combined_gain_idx_tuple_t& o)
1964     {
1965       if (*this < o) return true;
1966       return gain == o.gain && idx_1 == o.idx_1 && idx_2 == o.idx_2;
1967     }
1968   };
1969 
as_item_varstoreOT::item_variations_t1970   bool as_item_varstore (bool optimize=true, bool use_no_variation_idx=true)
1971   {
1972     if (!region_list) return false;
1973     unsigned num_cols = region_list.length;
1974     /* pre-alloc a 2D vector for all sub_table's VarData rows */
1975     unsigned total_rows = 0;
1976     for (unsigned major = 0; major < vars.length; major++)
1977     {
1978       const tuple_variations_t& tuples = vars[major];
1979       /* all tuples in each sub_table should have same num of deltas(num rows) */
1980       total_rows += tuples.tuple_vars[0].deltas_x.length;
1981     }
1982 
1983     if (!delta_rows.resize (total_rows)) return false;
1984     /* init all rows to [0]*num_cols */
1985     for (unsigned i = 0; i < total_rows; i++)
1986       if (!(delta_rows[i].resize (num_cols))) return false;
1987 
1988     /* old VarIdxes -> full encoding_row mapping */
1989     hb_hashmap_t<unsigned, const hb_vector_t<int>*> front_mapping;
1990     unsigned start_row = 0;
1991     hb_vector_t<delta_row_encoding_t> encoding_objs;
1992     hb_hashmap_t<hb_vector_t<uint8_t>, unsigned> chars_idx_map;
1993 
1994     /* delta_rows map, used for filtering out duplicate rows */
1995     hb_hashmap_t<const hb_vector_t<int>*, unsigned> delta_rows_map;
1996     for (unsigned major = 0; major < vars.length; major++)
1997     {
1998       /* deltas are stored in tuples(column based), convert them back into items
1999        * (row based) delta */
2000       const tuple_variations_t& tuples = vars[major];
2001       unsigned num_rows = tuples.tuple_vars[0].deltas_x.length;
2002       for (const tuple_delta_t& tuple: tuples.tuple_vars)
2003       {
2004         if (tuple.deltas_x.length != num_rows)
2005           return false;
2006 
2007         /* skip unused regions */
2008         unsigned *col_idx;
2009         if (!region_map.has (&(tuple.axis_tuples), &col_idx))
2010           continue;
2011 
2012         for (unsigned i = 0; i < num_rows; i++)
2013         {
2014           int rounded_delta = roundf (tuple.deltas_x[i]);
2015           delta_rows[start_row + i][*col_idx] += rounded_delta;
2016           if ((!has_long) && (rounded_delta < -65536 || rounded_delta > 65535))
2017             has_long = true;
2018         }
2019       }
2020 
2021       if (!optimize)
2022       {
2023         /* assemble a delta_row_encoding_t for this subtable, skip optimization so
2024          * chars is not initialized, we only need delta rows for serialization */
2025         delta_row_encoding_t obj;
2026         for (unsigned r = start_row; r < start_row + num_rows; r++)
2027           obj.add_row (&(delta_rows.arrayZ[r]));
2028 
2029         encodings.push (std::move (obj));
2030         start_row += num_rows;
2031         continue;
2032       }
2033 
2034       for (unsigned minor = 0; minor < num_rows; minor++)
2035       {
2036         const hb_vector_t<int>& row = delta_rows[start_row + minor];
2037         if (use_no_variation_idx)
2038         {
2039           bool all_zeros = true;
2040           for (int delta : row)
2041           {
2042             if (delta != 0)
2043             {
2044               all_zeros = false;
2045               break;
2046             }
2047           }
2048           if (all_zeros)
2049             continue;
2050         }
2051 
2052         if (!front_mapping.set ((major<<16) + minor, &row))
2053           return false;
2054 
2055         hb_vector_t<uint8_t> chars = delta_row_encoding_t::get_row_chars (row);
2056         if (!chars) return false;
2057 
2058         if (delta_rows_map.has (&row))
2059           continue;
2060 
2061         delta_rows_map.set (&row, 1);
2062         unsigned *obj_idx;
2063         if (chars_idx_map.has (chars, &obj_idx))
2064         {
2065           delta_row_encoding_t& obj = encoding_objs[*obj_idx];
2066           if (!obj.add_row (&row))
2067             return false;
2068         }
2069         else
2070         {
2071           if (!chars_idx_map.set (chars, encoding_objs.length))
2072             return false;
2073           delta_row_encoding_t obj (std::move (chars), &row);
2074           encoding_objs.push (std::move (obj));
2075         }
2076       }
2077 
2078       start_row += num_rows;
2079     }
2080 
2081     /* return directly if no optimization, maintain original VariationIndex so
2082      * varidx_map would be empty */
2083     if (!optimize) return !encodings.in_error ();
2084 
2085     /* sort encoding_objs */
2086     encoding_objs.qsort ();
2087 
2088     /* main algorithm: repeatedly pick 2 best encodings to combine, and combine
2089      * them */
2090     hb_priority_queue_t<combined_gain_idx_tuple_t> queue;
2091     unsigned num_todos = encoding_objs.length;
2092     for (unsigned i = 0; i < num_todos; i++)
2093     {
2094       for (unsigned j = i + 1; j < num_todos; j++)
2095       {
2096         int combining_gain = encoding_objs.arrayZ[i].gain_from_merging (encoding_objs.arrayZ[j]);
2097         if (combining_gain > 0)
2098           queue.insert (combined_gain_idx_tuple_t (-combining_gain, i, j), 0);
2099       }
2100     }
2101 
2102     hb_set_t removed_todo_idxes;
2103     while (queue)
2104     {
2105       auto t = queue.pop_minimum ().first;
2106       unsigned i = t.idx_1;
2107       unsigned j = t.idx_2;
2108 
2109       if (removed_todo_idxes.has (i) || removed_todo_idxes.has (j))
2110         continue;
2111 
2112       delta_row_encoding_t& encoding = encoding_objs.arrayZ[i];
2113       delta_row_encoding_t& other_encoding = encoding_objs.arrayZ[j];
2114 
2115       removed_todo_idxes.add (i);
2116       removed_todo_idxes.add (j);
2117 
2118       hb_vector_t<uint8_t> combined_chars;
2119       if (!combined_chars.alloc (encoding.chars.length))
2120         return false;
2121 
2122       for (unsigned idx = 0; idx < encoding.chars.length; idx++)
2123       {
2124         uint8_t v = hb_max (encoding.chars.arrayZ[idx], other_encoding.chars.arrayZ[idx]);
2125         combined_chars.push (v);
2126       }
2127 
2128       delta_row_encoding_t combined_encoding_obj (std::move (combined_chars));
2129       for (const auto& row : hb_concat (encoding.items, other_encoding.items))
2130         combined_encoding_obj.add_row (row);
2131 
2132       for (unsigned idx = 0; idx < encoding_objs.length; idx++)
2133       {
2134         if (removed_todo_idxes.has (idx)) continue;
2135 
2136         const delta_row_encoding_t& obj = encoding_objs.arrayZ[idx];
2137         if (obj.chars == combined_chars)
2138         {
2139           for (const auto& row : obj.items)
2140             combined_encoding_obj.add_row (row);
2141 
2142           removed_todo_idxes.add (idx);
2143           continue;
2144         }
2145 
2146         int combined_gain = combined_encoding_obj.gain_from_merging (obj);
2147         if (combined_gain > 0)
2148           queue.insert (combined_gain_idx_tuple_t (-combined_gain, idx, encoding_objs.length), 0);
2149       }
2150 
2151       encoding_objs.push (std::move (combined_encoding_obj));
2152     }
2153 
2154     int num_final_encodings = (int) encoding_objs.length - (int) removed_todo_idxes.get_population ();
2155     if (num_final_encodings <= 0) return false;
2156 
2157     if (!encodings.alloc (num_final_encodings)) return false;
2158     for (unsigned i = 0; i < encoding_objs.length; i++)
2159     {
2160       if (removed_todo_idxes.has (i)) continue;
2161       encodings.push (std::move (encoding_objs.arrayZ[i]));
2162     }
2163 
2164     /* sort again based on width, make result deterministic */
2165     encodings.qsort (delta_row_encoding_t::cmp_width);
2166 
2167     return compile_varidx_map (front_mapping);
2168   }
2169 
2170   private:
2171   /* compile varidx_map for one VarData subtable (index specified by major) */
compile_varidx_mapOT::item_variations_t2172   bool compile_varidx_map (const hb_hashmap_t<unsigned, const hb_vector_t<int>*>& front_mapping)
2173   {
2174     /* full encoding_row -> new VarIdxes mapping */
2175     hb_hashmap_t<const hb_vector_t<int>*, unsigned> back_mapping;
2176 
2177     for (unsigned major = 0; major < encodings.length; major++)
2178     {
2179       delta_row_encoding_t& encoding = encodings[major];
2180       /* just sanity check, this shouldn't happen */
2181       if (encoding.is_empty ())
2182         return false;
2183 
2184       unsigned num_rows = encoding.items.length;
2185 
2186       /* sort rows, make result deterministic */
2187       encoding.items.qsort (_cmp_row);
2188 
2189       /* compile old to new var_idxes mapping */
2190       for (unsigned minor = 0; minor < num_rows; minor++)
2191       {
2192         unsigned new_varidx = (major << 16) + minor;
2193         back_mapping.set (encoding.items.arrayZ[minor], new_varidx);
2194       }
2195     }
2196 
2197     for (auto _ : front_mapping.iter ())
2198     {
2199       unsigned old_varidx = _.first;
2200       unsigned *new_varidx;
2201       if (back_mapping.has (_.second, &new_varidx))
2202         varidx_map.set (old_varidx, *new_varidx);
2203       else
2204         varidx_map.set (old_varidx, HB_OT_LAYOUT_NO_VARIATIONS_INDEX);
2205     }
2206     return !varidx_map.in_error ();
2207   }
2208 
_cmp_rowOT::item_variations_t2209   static int _cmp_row (const void *pa, const void *pb)
2210   {
2211     /* compare pointers of vectors(const hb_vector_t<int>*) that represent a row */
2212     const hb_vector_t<int>** a = (const hb_vector_t<int>**) pa;
2213     const hb_vector_t<int>** b = (const hb_vector_t<int>**) pb;
2214 
2215     for (unsigned i = 0; i < (*b)->length; i++)
2216     {
2217       int va = (*a)->arrayZ[i];
2218       int vb = (*b)->arrayZ[i];
2219       if (va != vb)
2220         return va < vb ? -1 : 1;
2221     }
2222     return 0;
2223   }
2224 };
2225 
2226 } /* namespace OT */
2227 
2228 
2229 #endif /* HB_OT_VAR_COMMON_HH */
2230