• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5part of dart.ui;
6
7// Some methods in this file assert that their arguments are not null. These
8// asserts are just to improve the error messages; they should only cover
9// arguments that are either dereferenced _in Dart_, before being passed to the
10// engine, or that the engine explicitly null-checks itself (after attempting to
11// convert the argument to a native type). It should not be possible for a null
12// or invalid value to be used by the engine even in release mode, since that
13// would cause a crash. It is, however, acceptable for error messages to be much
14// less useful or correct in release mode than in debug mode.
15//
16// Painting APIs will also warn about arguments representing NaN coordinates,
17// which can not be rendered by Skia.
18
19// Update this list when changing the list of supported codecs.
20/// {@template flutter.dart:ui.imageFormats}
21/// JPEG, PNG, GIF, Animated GIF, WebP, Animated WebP, BMP, and WBMP
22/// {@endtemplate}
23
24bool _rectIsValid(Rect rect) {
25  assert(rect != null, 'Rect argument was null.');
26  assert(!rect.hasNaN, 'Rect argument contained a NaN value.');
27  return true;
28}
29
30bool _rrectIsValid(RRect rrect) {
31  assert(rrect != null, 'RRect argument was null.');
32  assert(!rrect.hasNaN, 'RRect argument contained a NaN value.');
33  return true;
34}
35
36bool _offsetIsValid(Offset offset) {
37  assert(offset != null, 'Offset argument was null.');
38  assert(!offset.dx.isNaN && !offset.dy.isNaN, 'Offset argument contained a NaN value.');
39  return true;
40}
41
42bool _matrix4IsValid(Float64List matrix4) {
43  assert(matrix4 != null, 'Matrix4 argument was null.');
44  assert(matrix4.length == 16, 'Matrix4 must have 16 entries.');
45  assert(matrix4.every((double value) => value.isFinite), 'Matrix4 entries must be finite.');
46  return true;
47}
48
49bool _radiusIsValid(Radius radius) {
50  assert(radius != null, 'Radius argument was null.');
51  assert(!radius.x.isNaN && !radius.y.isNaN, 'Radius argument contained a NaN value.');
52  return true;
53}
54
55Color _scaleAlpha(Color a, double factor) {
56  return a.withAlpha((a.alpha * factor).round().clamp(0, 255));
57}
58
59/// An immutable 32 bit color value in ARGB format.
60///
61/// Consider the light teal of the Flutter logo. It is fully opaque, with a red
62/// channel value of 0x42 (66), a green channel value of 0xA5 (165), and a blue
63/// channel value of 0xF5 (245). In the common "hash syntax" for color values,
64/// it would be described as `#42A5F5`.
65///
66/// Here are some ways it could be constructed:
67///
68/// ```dart
69/// Color c = const Color(0xFF42A5F5);
70/// Color c = const Color.fromARGB(0xFF, 0x42, 0xA5, 0xF5);
71/// Color c = const Color.fromARGB(255, 66, 165, 245);
72/// Color c = const Color.fromRGBO(66, 165, 245, 1.0);
73/// ```
74///
75/// If you are having a problem with `Color` wherein it seems your color is just
76/// not painting, check to make sure you are specifying the full 8 hexadecimal
77/// digits. If you only specify six, then the leading two digits are assumed to
78/// be zero, which means fully-transparent:
79///
80/// ```dart
81/// Color c1 = const Color(0xFFFFFF); // fully transparent white (invisible)
82/// Color c2 = const Color(0xFFFFFFFF); // fully opaque white (visible)
83/// ```
84///
85/// See also:
86///
87///  * [Colors](https://docs.flutter.io/flutter/material/Colors-class.html), which
88///    defines the colors found in the Material Design specification.
89class Color {
90  /// Construct a color from the lower 32 bits of an [int].
91  ///
92  /// The bits are interpreted as follows:
93  ///
94  /// * Bits 24-31 are the alpha value.
95  /// * Bits 16-23 are the red value.
96  /// * Bits 8-15 are the green value.
97  /// * Bits 0-7 are the blue value.
98  ///
99  /// In other words, if AA is the alpha value in hex, RR the red value in hex,
100  /// GG the green value in hex, and BB the blue value in hex, a color can be
101  /// expressed as `const Color(0xAARRGGBB)`.
102  ///
103  /// For example, to get a fully opaque orange, you would use `const
104  /// Color(0xFFFF9000)` (`FF` for the alpha, `FF` for the red, `90` for the
105  /// green, and `00` for the blue).
106  @pragma('vm:entry-point')
107  const Color(int value) : value = value & 0xFFFFFFFF;
108
109  /// Construct a color from the lower 8 bits of four integers.
110  ///
111  /// * `a` is the alpha value, with 0 being transparent and 255 being fully
112  ///   opaque.
113  /// * `r` is [red], from 0 to 255.
114  /// * `g` is [green], from 0 to 255.
115  /// * `b` is [blue], from 0 to 255.
116  ///
117  /// Out of range values are brought into range using modulo 255.
118  ///
119  /// See also [fromRGBO], which takes the alpha value as a floating point
120  /// value.
121  const Color.fromARGB(int a, int r, int g, int b) :
122    value = (((a & 0xff) << 24) |
123             ((r & 0xff) << 16) |
124             ((g & 0xff) << 8)  |
125             ((b & 0xff) << 0)) & 0xFFFFFFFF;
126
127  /// Create a color from red, green, blue, and opacity, similar to `rgba()` in CSS.
128  ///
129  /// * `r` is [red], from 0 to 255.
130  /// * `g` is [green], from 0 to 255.
131  /// * `b` is [blue], from 0 to 255.
132  /// * `opacity` is alpha channel of this color as a double, with 0.0 being
133  ///   transparent and 1.0 being fully opaque.
134  ///
135  /// Out of range values are brought into range using modulo 255.
136  ///
137  /// See also [fromARGB], which takes the opacity as an integer value.
138  const Color.fromRGBO(int r, int g, int b, double opacity) :
139    value = ((((opacity * 0xff ~/ 1) & 0xff) << 24) |
140              ((r                    & 0xff) << 16) |
141              ((g                    & 0xff) << 8)  |
142              ((b                    & 0xff) << 0)) & 0xFFFFFFFF;
143
144  /// A 32 bit value representing this color.
145  ///
146  /// The bits are assigned as follows:
147  ///
148  /// * Bits 24-31 are the alpha value.
149  /// * Bits 16-23 are the red value.
150  /// * Bits 8-15 are the green value.
151  /// * Bits 0-7 are the blue value.
152  final int value;
153
154  /// The alpha channel of this color in an 8 bit value.
155  ///
156  /// A value of 0 means this color is fully transparent. A value of 255 means
157  /// this color is fully opaque.
158  int get alpha => (0xff000000 & value) >> 24;
159
160  /// The alpha channel of this color as a double.
161  ///
162  /// A value of 0.0 means this color is fully transparent. A value of 1.0 means
163  /// this color is fully opaque.
164  double get opacity => alpha / 0xFF;
165
166  /// The red channel of this color in an 8 bit value.
167  int get red => (0x00ff0000 & value) >> 16;
168
169  /// The green channel of this color in an 8 bit value.
170  int get green => (0x0000ff00 & value) >> 8;
171
172  /// The blue channel of this color in an 8 bit value.
173  int get blue => (0x000000ff & value) >> 0;
174
175  /// Returns a new color that matches this color with the alpha channel
176  /// replaced with `a` (which ranges from 0 to 255).
177  ///
178  /// Out of range values will have unexpected effects.
179  Color withAlpha(int a) {
180    return Color.fromARGB(a, red, green, blue);
181  }
182
183  /// Returns a new color that matches this color with the alpha channel
184  /// replaced with the given `opacity` (which ranges from 0.0 to 1.0).
185  ///
186  /// Out of range values will have unexpected effects.
187  Color withOpacity(double opacity) {
188    assert(opacity >= 0.0 && opacity <= 1.0);
189    return withAlpha((255.0 * opacity).round());
190  }
191
192  /// Returns a new color that matches this color with the red channel replaced
193  /// with `r` (which ranges from 0 to 255).
194  ///
195  /// Out of range values will have unexpected effects.
196  Color withRed(int r) {
197    return Color.fromARGB(alpha, r, green, blue);
198  }
199
200  /// Returns a new color that matches this color with the green channel
201  /// replaced with `g` (which ranges from 0 to 255).
202  ///
203  /// Out of range values will have unexpected effects.
204  Color withGreen(int g) {
205    return Color.fromARGB(alpha, red, g, blue);
206  }
207
208  /// Returns a new color that matches this color with the blue channel replaced
209  /// with `b` (which ranges from 0 to 255).
210  ///
211  /// Out of range values will have unexpected effects.
212  Color withBlue(int b) {
213    return Color.fromARGB(alpha, red, green, b);
214  }
215
216  // See <https://www.w3.org/TR/WCAG20/#relativeluminancedef>
217  static double _linearizeColorComponent(double component) {
218    if (component <= 0.03928)
219      return component / 12.92;
220    return math.pow((component + 0.055) / 1.055, 2.4);
221  }
222
223  /// Returns a brightness value between 0 for darkest and 1 for lightest.
224  ///
225  /// Represents the relative luminance of the color. This value is computationally
226  /// expensive to calculate.
227  ///
228  /// See <https://en.wikipedia.org/wiki/Relative_luminance>.
229  double computeLuminance() {
230    // See <https://www.w3.org/TR/WCAG20/#relativeluminancedef>
231    final double R = _linearizeColorComponent(red / 0xFF);
232    final double G = _linearizeColorComponent(green / 0xFF);
233    final double B = _linearizeColorComponent(blue / 0xFF);
234    return 0.2126 * R + 0.7152 * G + 0.0722 * B;
235  }
236
237  /// Linearly interpolate between two colors.
238  ///
239  /// This is intended to be fast but as a result may be ugly. Consider
240  /// [HSVColor] or writing custom logic for interpolating colors.
241  ///
242  /// If either color is null, this function linearly interpolates from a
243  /// transparent instance of the other color. This is usually preferable to
244  /// interpolating from [material.Colors.transparent] (`const
245  /// Color(0x00000000)`), which is specifically transparent _black_.
246  ///
247  /// The `t` argument represents position on the timeline, with 0.0 meaning
248  /// that the interpolation has not started, returning `a` (or something
249  /// equivalent to `a`), 1.0 meaning that the interpolation has finished,
250  /// returning `b` (or something equivalent to `b`), and values in between
251  /// meaning that the interpolation is at the relevant point on the timeline
252  /// between `a` and `b`. The interpolation can be extrapolated beyond 0.0 and
253  /// 1.0, so negative values and values greater than 1.0 are valid (and can
254  /// easily be generated by curves such as [Curves.elasticInOut]). Each channel
255  /// will be clamped to the range 0 to 255.
256  ///
257  /// Values for `t` are usually obtained from an [Animation<double>], such as
258  /// an [AnimationController].
259  static Color lerp(Color a, Color b, double t) {
260    assert(t != null);
261    if (a == null && b == null)
262      return null;
263    if (a == null)
264      return _scaleAlpha(b, t);
265    if (b == null)
266      return _scaleAlpha(a, 1.0 - t);
267    return Color.fromARGB(
268      lerpDouble(a.alpha, b.alpha, t).toInt().clamp(0, 255),
269      lerpDouble(a.red, b.red, t).toInt().clamp(0, 255),
270      lerpDouble(a.green, b.green, t).toInt().clamp(0, 255),
271      lerpDouble(a.blue, b.blue, t).toInt().clamp(0, 255),
272    );
273  }
274
275  /// Combine the foreground color as a transparent color over top
276  /// of a background color, and return the resulting combined color.
277  ///
278  /// This uses standard alpha blending ("SRC over DST") rules to produce a
279  /// blended color from two colors. This can be used as a performance
280  /// enhancement when trying to avoid needless alpha blending compositing
281  /// operations for two things that are solid colors with the same shape, but
282  /// overlay each other: instead, just paint one with the combined color.
283  static Color alphaBlend(Color foreground, Color background) {
284    final int alpha = foreground.alpha;
285    if (alpha == 0x00) { // Foreground completely transparent.
286      return background;
287    }
288    final int invAlpha = 0xff - alpha;
289    int backAlpha = background.alpha;
290    if (backAlpha == 0xff) { // Opaque background case
291      return Color.fromARGB(
292        0xff,
293        (alpha * foreground.red + invAlpha * background.red) ~/ 0xff,
294        (alpha * foreground.green + invAlpha * background.green) ~/ 0xff,
295        (alpha * foreground.blue + invAlpha * background.blue) ~/ 0xff,
296      );
297    } else { // General case
298      backAlpha = (backAlpha * invAlpha) ~/ 0xff;
299      final int outAlpha = alpha + backAlpha;
300      assert(outAlpha != 0x00);
301      return Color.fromARGB(
302        outAlpha,
303        (foreground.red * alpha + background.red * backAlpha) ~/ outAlpha,
304        (foreground.green * alpha + background.green * backAlpha) ~/ outAlpha,
305        (foreground.blue * alpha + background.blue * backAlpha) ~/ outAlpha,
306      );
307    }
308  }
309
310  @override
311  bool operator ==(dynamic other) {
312    if (identical(this, other))
313      return true;
314    if (other.runtimeType != runtimeType)
315      return false;
316    final Color typedOther = other;
317    return value == typedOther.value;
318  }
319
320  @override
321  int get hashCode => value.hashCode;
322
323  @override
324  String toString() => 'Color(0x${value.toRadixString(16).padLeft(8, '0')})';
325}
326
327/// Algorithms to use when painting on the canvas.
328///
329/// When drawing a shape or image onto a canvas, different algorithms can be
330/// used to blend the pixels. The different values of [BlendMode] specify
331/// different such algorithms.
332///
333/// Each algorithm has two inputs, the _source_, which is the image being drawn,
334/// and the _destination_, which is the image into which the source image is
335/// being composited. The destination is often thought of as the _background_.
336/// The source and destination both have four color channels, the red, green,
337/// blue, and alpha channels. These are typically represented as numbers in the
338/// range 0.0 to 1.0. The output of the algorithm also has these same four
339/// channels, with values computed from the source and destination.
340///
341/// The documentation of each value below describes how the algorithm works. In
342/// each case, an image shows the output of blending a source image with a
343/// destination image. In the images below, the destination is represented by an
344/// image with horizontal lines and an opaque landscape photograph, and the
345/// source is represented by an image with vertical lines (the same lines but
346/// rotated) and a bird clip-art image. The [src] mode shows only the source
347/// image, and the [dst] mode shows only the destination image. In the
348/// documentation below, the transparency is illustrated by a checkerboard
349/// pattern. The [clear] mode drops both the source and destination, resulting
350/// in an output that is entirely transparent (illustrated by a solid
351/// checkerboard pattern).
352///
353/// The horizontal and vertical bars in these images show the red, green, and
354/// blue channels with varying opacity levels, then all three color channels
355/// together with those same varying opacity levels, then all three color
356/// channels set to zero with those varying opacity levels, then two bars showing
357/// a red/green/blue repeating gradient, the first with full opacity and the
358/// second with partial opacity, and finally a bar with the three color channels
359/// set to zero but the opacity varying in a repeating gradient.
360///
361/// ## Application to the [Canvas] API
362///
363/// When using [Canvas.saveLayer] and [Canvas.restore], the blend mode of the
364/// [Paint] given to the [Canvas.saveLayer] will be applied when
365/// [Canvas.restore] is called. Each call to [Canvas.saveLayer] introduces a new
366/// layer onto which shapes and images are painted; when [Canvas.restore] is
367/// called, that layer is then composited onto the parent layer, with the source
368/// being the most-recently-drawn shapes and images, and the destination being
369/// the parent layer. (For the first [Canvas.saveLayer] call, the parent layer
370/// is the canvas itself.)
371///
372/// See also:
373///
374///  * [Paint.blendMode], which uses [BlendMode] to define the compositing
375///    strategy.
376enum BlendMode {
377  // This list comes from Skia's SkXfermode.h and the values (order) should be
378  // kept in sync.
379  // See: https://skia.org/user/api/skpaint#SkXfermode
380
381  /// Drop both the source and destination images, leaving nothing.
382  ///
383  /// This corresponds to the "clear" Porter-Duff operator.
384  ///
385  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_clear.png)
386  clear,
387
388  /// Drop the destination image, only paint the source image.
389  ///
390  /// Conceptually, the destination is first cleared, then the source image is
391  /// painted.
392  ///
393  /// This corresponds to the "Copy" Porter-Duff operator.
394  ///
395  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_src.png)
396  src,
397
398  /// Drop the source image, only paint the destination image.
399  ///
400  /// Conceptually, the source image is discarded, leaving the destination
401  /// untouched.
402  ///
403  /// This corresponds to the "Destination" Porter-Duff operator.
404  ///
405  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_dst.png)
406  dst,
407
408  /// Composite the source image over the destination image.
409  ///
410  /// This is the default value. It represents the most intuitive case, where
411  /// shapes are painted on top of what is below, with transparent areas showing
412  /// the destination layer.
413  ///
414  /// This corresponds to the "Source over Destination" Porter-Duff operator,
415  /// also known as the Painter's Algorithm.
416  ///
417  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_srcOver.png)
418  srcOver,
419
420  /// Composite the source image under the destination image.
421  ///
422  /// This is the opposite of [srcOver].
423  ///
424  /// This corresponds to the "Destination over Source" Porter-Duff operator.
425  ///
426  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_dstOver.png)
427  ///
428  /// This is useful when the source image should have been painted before the
429  /// destination image, but could not be.
430  dstOver,
431
432  /// Show the source image, but only where the two images overlap. The
433  /// destination image is not rendered, it is treated merely as a mask. The
434  /// color channels of the destination are ignored, only the opacity has an
435  /// effect.
436  ///
437  /// To show the destination image instead, consider [dstIn].
438  ///
439  /// To reverse the semantic of the mask (only showing the source where the
440  /// destination is absent, rather than where it is present), consider
441  /// [srcOut].
442  ///
443  /// This corresponds to the "Source in Destination" Porter-Duff operator.
444  ///
445  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_srcIn.png)
446  srcIn,
447
448  /// Show the destination image, but only where the two images overlap. The
449  /// source image is not rendered, it is treated merely as a mask. The color
450  /// channels of the source are ignored, only the opacity has an effect.
451  ///
452  /// To show the source image instead, consider [srcIn].
453  ///
454  /// To reverse the semantic of the mask (only showing the source where the
455  /// destination is present, rather than where it is absent), consider [dstOut].
456  ///
457  /// This corresponds to the "Destination in Source" Porter-Duff operator.
458  ///
459  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_dstIn.png)
460  dstIn,
461
462  /// Show the source image, but only where the two images do not overlap. The
463  /// destination image is not rendered, it is treated merely as a mask. The color
464  /// channels of the destination are ignored, only the opacity has an effect.
465  ///
466  /// To show the destination image instead, consider [dstOut].
467  ///
468  /// To reverse the semantic of the mask (only showing the source where the
469  /// destination is present, rather than where it is absent), consider [srcIn].
470  ///
471  /// This corresponds to the "Source out Destination" Porter-Duff operator.
472  ///
473  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_srcOut.png)
474  srcOut,
475
476  /// Show the destination image, but only where the two images do not overlap. The
477  /// source image is not rendered, it is treated merely as a mask. The color
478  /// channels of the source are ignored, only the opacity has an effect.
479  ///
480  /// To show the source image instead, consider [srcOut].
481  ///
482  /// To reverse the semantic of the mask (only showing the destination where the
483  /// source is present, rather than where it is absent), consider [dstIn].
484  ///
485  /// This corresponds to the "Destination out Source" Porter-Duff operator.
486  ///
487  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_dstOut.png)
488  dstOut,
489
490  /// Composite the source image over the destination image, but only where it
491  /// overlaps the destination.
492  ///
493  /// This corresponds to the "Source atop Destination" Porter-Duff operator.
494  ///
495  /// This is essentially the [srcOver] operator, but with the output's opacity
496  /// channel being set to that of the destination image instead of being a
497  /// combination of both image's opacity channels.
498  ///
499  /// For a variant with the destination on top instead of the source, see
500  /// [dstATop].
501  ///
502  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_srcATop.png)
503  srcATop,
504
505  /// Composite the destination image over the source image, but only where it
506  /// overlaps the source.
507  ///
508  /// This corresponds to the "Destination atop Source" Porter-Duff operator.
509  ///
510  /// This is essentially the [dstOver] operator, but with the output's opacity
511  /// channel being set to that of the source image instead of being a
512  /// combination of both image's opacity channels.
513  ///
514  /// For a variant with the source on top instead of the destination, see
515  /// [srcATop].
516  ///
517  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_dstATop.png)
518  dstATop,
519
520  /// Apply a bitwise `xor` operator to the source and destination images. This
521  /// leaves transparency where they would overlap.
522  ///
523  /// This corresponds to the "Source xor Destination" Porter-Duff operator.
524  ///
525  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_xor.png)
526  xor,
527
528  /// Sum the components of the source and destination images.
529  ///
530  /// Transparency in a pixel of one of the images reduces the contribution of
531  /// that image to the corresponding output pixel, as if the color of that
532  /// pixel in that image was darker.
533  ///
534  /// This corresponds to the "Source plus Destination" Porter-Duff operator.
535  ///
536  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_plus.png)
537  plus,
538
539  /// Multiply the color components of the source and destination images.
540  ///
541  /// This can only result in the same or darker colors (multiplying by white,
542  /// 1.0, results in no change; multiplying by black, 0.0, results in black).
543  ///
544  /// When compositing two opaque images, this has similar effect to overlapping
545  /// two transparencies on a projector.
546  ///
547  /// For a variant that also multiplies the alpha channel, consider [multiply].
548  ///
549  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_modulate.png)
550  ///
551  /// See also:
552  ///
553  ///  * [screen], which does a similar computation but inverted.
554  ///  * [overlay], which combines [modulate] and [screen] to favor the
555  ///    destination image.
556  ///  * [hardLight], which combines [modulate] and [screen] to favor the
557  ///    source image.
558  modulate,
559
560  // Following blend modes are defined in the CSS Compositing standard.
561
562  /// Multiply the inverse of the components of the source and destination
563  /// images, and inverse the result.
564  ///
565  /// Inverting the components means that a fully saturated channel (opaque
566  /// white) is treated as the value 0.0, and values normally treated as 0.0
567  /// (black, transparent) are treated as 1.0.
568  ///
569  /// This is essentially the same as [modulate] blend mode, but with the values
570  /// of the colors inverted before the multiplication and the result being
571  /// inverted back before rendering.
572  ///
573  /// This can only result in the same or lighter colors (multiplying by black,
574  /// 1.0, results in no change; multiplying by white, 0.0, results in white).
575  /// Similarly, in the alpha channel, it can only result in more opaque colors.
576  ///
577  /// This has similar effect to two projectors displaying their images on the
578  /// same screen simultaneously.
579  ///
580  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_screen.png)
581  ///
582  /// See also:
583  ///
584  ///  * [modulate], which does a similar computation but without inverting the
585  ///    values.
586  ///  * [overlay], which combines [modulate] and [screen] to favor the
587  ///    destination image.
588  ///  * [hardLight], which combines [modulate] and [screen] to favor the
589  ///    source image.
590  screen,  // The last coeff mode.
591
592  /// Multiply the components of the source and destination images after
593  /// adjusting them to favor the destination.
594  ///
595  /// Specifically, if the destination value is smaller, this multiplies it with
596  /// the source value, whereas is the source value is smaller, it multiplies
597  /// the inverse of the source value with the inverse of the destination value,
598  /// then inverts the result.
599  ///
600  /// Inverting the components means that a fully saturated channel (opaque
601  /// white) is treated as the value 0.0, and values normally treated as 0.0
602  /// (black, transparent) are treated as 1.0.
603  ///
604  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_overlay.png)
605  ///
606  /// See also:
607  ///
608  ///  * [modulate], which always multiplies the values.
609  ///  * [screen], which always multiplies the inverses of the values.
610  ///  * [hardLight], which is similar to [overlay] but favors the source image
611  ///    instead of the destination image.
612  overlay,
613
614  /// Composite the source and destination image by choosing the lowest value
615  /// from each color channel.
616  ///
617  /// The opacity of the output image is computed in the same way as for
618  /// [srcOver].
619  ///
620  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_darken.png)
621  darken,
622
623  /// Composite the source and destination image by choosing the highest value
624  /// from each color channel.
625  ///
626  /// The opacity of the output image is computed in the same way as for
627  /// [srcOver].
628  ///
629  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_lighten.png)
630  lighten,
631
632  /// Divide the destination by the inverse of the source.
633  ///
634  /// Inverting the components means that a fully saturated channel (opaque
635  /// white) is treated as the value 0.0, and values normally treated as 0.0
636  /// (black, transparent) are treated as 1.0.
637  ///
638  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_colorDodge.png)
639  colorDodge,
640
641  /// Divide the inverse of the destination by the the source, and inverse the result.
642  ///
643  /// Inverting the components means that a fully saturated channel (opaque
644  /// white) is treated as the value 0.0, and values normally treated as 0.0
645  /// (black, transparent) are treated as 1.0.
646  ///
647  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_colorBurn.png)
648  colorBurn,
649
650  /// Multiply the components of the source and destination images after
651  /// adjusting them to favor the source.
652  ///
653  /// Specifically, if the source value is smaller, this multiplies it with the
654  /// destination value, whereas is the destination value is smaller, it
655  /// multiplies the inverse of the destination value with the inverse of the
656  /// source value, then inverts the result.
657  ///
658  /// Inverting the components means that a fully saturated channel (opaque
659  /// white) is treated as the value 0.0, and values normally treated as 0.0
660  /// (black, transparent) are treated as 1.0.
661  ///
662  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_hardLight.png)
663  ///
664  /// See also:
665  ///
666  ///  * [modulate], which always multiplies the values.
667  ///  * [screen], which always multiplies the inverses of the values.
668  ///  * [overlay], which is similar to [hardLight] but favors the destination
669  ///    image instead of the source image.
670  hardLight,
671
672  /// Use [colorDodge] for source values below 0.5 and [colorBurn] for source
673  /// values above 0.5.
674  ///
675  /// This results in a similar but softer effect than [overlay].
676  ///
677  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_softLight.png)
678  ///
679  /// See also:
680  ///
681  ///  * [color], which is a more subtle tinting effect.
682  softLight,
683
684  /// Subtract the smaller value from the bigger value for each channel.
685  ///
686  /// Compositing black has no effect; compositing white inverts the colors of
687  /// the other image.
688  ///
689  /// The opacity of the output image is computed in the same way as for
690  /// [srcOver].
691  ///
692  /// The effect is similar to [exclusion] but harsher.
693  ///
694  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_difference.png)
695  difference,
696
697  /// Subtract double the product of the two images from the sum of the two
698  /// images.
699  ///
700  /// Compositing black has no effect; compositing white inverts the colors of
701  /// the other image.
702  ///
703  /// The opacity of the output image is computed in the same way as for
704  /// [srcOver].
705  ///
706  /// The effect is similar to [difference] but softer.
707  ///
708  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_exclusion.png)
709  exclusion,
710
711  /// Multiply the components of the source and destination images, including
712  /// the alpha channel.
713  ///
714  /// This can only result in the same or darker colors (multiplying by white,
715  /// 1.0, results in no change; multiplying by black, 0.0, results in black).
716  ///
717  /// Since the alpha channel is also multiplied, a fully-transparent pixel
718  /// (opacity 0.0) in one image results in a fully transparent pixel in the
719  /// output. This is similar to [dstIn], but with the colors combined.
720  ///
721  /// For a variant that multiplies the colors but does not multiply the alpha
722  /// channel, consider [modulate].
723  ///
724  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_multiply.png)
725  multiply,  // The last separable mode.
726
727  /// Take the hue of the source image, and the saturation and luminosity of the
728  /// destination image.
729  ///
730  /// The effect is to tint the destination image with the source image.
731  ///
732  /// The opacity of the output image is computed in the same way as for
733  /// [srcOver]. Regions that are entirely transparent in the source image take
734  /// their hue from the destination.
735  ///
736  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_hue.png)
737  ///
738  /// See also:
739  ///
740  ///  * [color], which is a similar but stronger effect as it also applies the
741  ///    saturation of the source image.
742  ///  * [HSVColor], which allows colors to be expressed using Hue rather than
743  ///    the red/green/blue channels of [Color].
744  hue,
745
746  /// Take the saturation of the source image, and the hue and luminosity of the
747  /// destination image.
748  ///
749  /// The opacity of the output image is computed in the same way as for
750  /// [srcOver]. Regions that are entirely transparent in the source image take
751  /// their saturation from the destination.
752  ///
753  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_hue.png)
754  ///
755  /// See also:
756  ///
757  ///  * [color], which also applies the hue of the source image.
758  ///  * [luminosity], which applies the luminosity of the source image to the
759  ///    destination.
760  saturation,
761
762  /// Take the hue and saturation of the source image, and the luminosity of the
763  /// destination image.
764  ///
765  /// The effect is to tint the destination image with the source image.
766  ///
767  /// The opacity of the output image is computed in the same way as for
768  /// [srcOver]. Regions that are entirely transparent in the source image take
769  /// their hue and saturation from the destination.
770  ///
771  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_color.png)
772  ///
773  /// See also:
774  ///
775  ///  * [hue], which is a similar but weaker effect.
776  ///  * [softLight], which is a similar tinting effect but also tints white.
777  ///  * [saturation], which only applies the saturation of the source image.
778  color,
779
780  /// Take the luminosity of the source image, and the hue and saturation of the
781  /// destination image.
782  ///
783  /// The opacity of the output image is computed in the same way as for
784  /// [srcOver]. Regions that are entirely transparent in the source image take
785  /// their luminosity from the destination.
786  ///
787  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/blend_mode_luminosity.png)
788  ///
789  /// See also:
790  ///
791  ///  * [saturation], which applies the saturation of the source image to the
792  ///    destination.
793  ///  * [ImageFilter.blur], which can be used with [BackdropFilter] for a
794  ///    related effect.
795  luminosity,
796}
797
798/// Quality levels for image filters.
799///
800/// See [Paint.filterQuality].
801enum FilterQuality {
802  // This list comes from Skia's SkFilterQuality.h and the values (order) should
803  // be kept in sync.
804
805  /// Fastest possible filtering, albeit also the lowest quality.
806  ///
807  /// Typically this implies nearest-neighbor filtering.
808  none,
809
810  /// Better quality than [none], faster than [medium].
811  ///
812  /// Typically this implies bilinear interpolation.
813  low,
814
815  /// Better quality than [low], faster than [high].
816  ///
817  /// Typically this implies a combination of bilinear interpolation and
818  /// pyramidal parametric pre-filtering (mipmaps).
819  medium,
820
821  /// Best possible quality filtering, albeit also the slowest.
822  ///
823  /// Typically this implies bicubic interpolation or better.
824  high,
825}
826
827/// Styles to use for line endings.
828///
829/// See also:
830///
831///  * [Paint.strokeCap] for how this value is used.
832///  * [StrokeJoin] for the different kinds of line segment joins.
833// These enum values must be kept in sync with SkPaint::Cap.
834enum StrokeCap {
835  /// Begin and end contours with a flat edge and no extension.
836  ///
837  /// ![A butt cap ends line segments with a square end that stops at the end of
838  /// the line segment.](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/butt_cap.png)
839  ///
840  /// Compare to the [square] cap, which has the same shape, but extends past
841  /// the end of the line by half a stroke width.
842  butt,
843
844  /// Begin and end contours with a semi-circle extension.
845  ///
846  /// ![A round cap adds a rounded end to the line segment that protrudes
847  /// by one half of the thickness of the line (which is the radius of the cap)
848  /// past the end of the segment.](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/round_cap.png)
849  ///
850  /// The cap is colored in the diagram above to highlight it: in normal use it
851  /// is the same color as the line.
852  round,
853
854  /// Begin and end contours with a half square extension. This is
855  /// similar to extending each contour by half the stroke width (as
856  /// given by [Paint.strokeWidth]).
857  ///
858  /// ![A square cap has a square end that effectively extends the line length
859  /// by half of the stroke width.](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/square_cap.png)
860  ///
861  /// The cap is colored in the diagram above to highlight it: in normal use it
862  /// is the same color as the line.
863  ///
864  /// Compare to the [butt] cap, which has the same shape, but doesn't extend
865  /// past the end of the line.
866  square,
867}
868
869/// Styles to use for line segment joins.
870///
871/// This only affects line joins for polygons drawn by [Canvas.drawPath] and
872/// rectangles, not points drawn as lines with [Canvas.drawPoints].
873///
874/// See also:
875///
876/// * [Paint.strokeJoin] and [Paint.strokeMiterLimit] for how this value is
877///   used.
878/// * [StrokeCap] for the different kinds of line endings.
879// These enum values must be kept in sync with SkPaint::Join.
880enum StrokeJoin {
881  /// Joins between line segments form sharp corners.
882  ///
883  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/miter_4_join.mp4}
884  ///
885  /// The center of the line segment is colored in the diagram above to
886  /// highlight the join, but in normal usage the join is the same color as the
887  /// line.
888  ///
889  /// See also:
890  ///
891  ///   * [Paint.strokeJoin], used to set the line segment join style to this
892  ///     value.
893  ///   * [Paint.strokeMiterLimit], used to define when a miter is drawn instead
894  ///     of a bevel when the join is set to this value.
895  miter,
896
897  /// Joins between line segments are semi-circular.
898  ///
899  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/round_join.mp4}
900  ///
901  /// The center of the line segment is colored in the diagram above to
902  /// highlight the join, but in normal usage the join is the same color as the
903  /// line.
904  ///
905  /// See also:
906  ///
907  ///   * [Paint.strokeJoin], used to set the line segment join style to this
908  ///     value.
909  round,
910
911  /// Joins between line segments connect the corners of the butt ends of the
912  /// line segments to give a beveled appearance.
913  ///
914  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/bevel_join.mp4}
915  ///
916  /// The center of the line segment is colored in the diagram above to
917  /// highlight the join, but in normal usage the join is the same color as the
918  /// line.
919  ///
920  /// See also:
921  ///
922  ///   * [Paint.strokeJoin], used to set the line segment join style to this
923  ///     value.
924  bevel,
925}
926
927/// Strategies for painting shapes and paths on a canvas.
928///
929/// See [Paint.style].
930// These enum values must be kept in sync with SkPaint::Style.
931enum PaintingStyle {
932  // This list comes from Skia's SkPaint.h and the values (order) should be kept
933  // in sync.
934
935  /// Apply the [Paint] to the inside of the shape. For example, when
936  /// applied to the [Canvas.drawCircle] call, this results in a disc
937  /// of the given size being painted.
938  fill,
939
940  /// Apply the [Paint] to the edge of the shape. For example, when
941  /// applied to the [Canvas.drawCircle] call, this results is a hoop
942  /// of the given size being painted. The line drawn on the edge will
943  /// be the width given by the [Paint.strokeWidth] property.
944  stroke,
945}
946
947
948/// Different ways to clip a widget's content.
949enum Clip {
950  /// No clip at all.
951  ///
952  /// This is the default option for most widgets: if the content does not
953  /// overflow the widget boundary, don't pay any performance cost for clipping.
954  ///
955  /// If the content does overflow, please explicitly specify the following
956  /// [Clip] options:
957  ///  * [hardEdge], which is the fastest clipping, but with lower fidelity.
958  ///  * [antiAlias], which is a little slower than [hardEdge], but with smoothed edges.
959  ///  * [antiAliasWithSaveLayer], which is much slower than [antiAlias], and should
960  ///    rarely be used.
961  none,
962
963  /// Clip, but do not apply anti-aliasing.
964  ///
965  /// This mode enables clipping, but curves and non-axis-aligned straight lines will be
966  /// jagged as no effort is made to anti-alias.
967  ///
968  /// Faster than other clipping modes, but slower than [none].
969  ///
970  /// This is a reasonable choice when clipping is needed, if the container is an axis-
971  /// aligned rectangle or an axis-aligned rounded rectangle with very small corner radii.
972  ///
973  /// See also:
974  ///
975  ///  * [antiAlias], which is more reasonable when clipping is needed and the shape is not
976  ///    an axis-aligned rectangle.
977  hardEdge,
978
979  /// Clip with anti-aliasing.
980  ///
981  /// This mode has anti-aliased clipping edges to achieve a smoother look.
982  ///
983  /// It' s much faster than [antiAliasWithSaveLayer], but slower than [hardEdge].
984  ///
985  /// This will be the common case when dealing with circles and arcs.
986  ///
987  /// Different from [hardEdge] and [antiAliasWithSaveLayer], this clipping may have
988  /// bleeding edge artifacts.
989  /// (See https://fiddle.skia.org/c/21cb4c2b2515996b537f36e7819288ae for an example.)
990  ///
991  /// See also:
992  ///
993  ///  * [hardEdge], which is a little faster, but with lower fidelity.
994  ///  * [antiAliasWithSaveLayer], which is much slower, but can avoid the
995  ///    bleeding edges if there's no other way.
996  ///  * [Paint.isAntiAlias], which is the anti-aliasing switch for general draw operations.
997  antiAlias,
998
999  /// Clip with anti-aliasing and saveLayer immediately following the clip.
1000  ///
1001  /// This mode not only clips with anti-aliasing, but also allocates an offscreen
1002  /// buffer. All subsequent paints are carried out on that buffer before finally
1003  /// being clipped and composited back.
1004  ///
1005  /// This is very slow. It has no bleeding edge artifacts (that [antiAlias] has)
1006  /// but it changes the semantics as an offscreen buffer is now introduced.
1007  /// (See https://github.com/flutter/flutter/issues/18057#issuecomment-394197336
1008  /// for a difference between paint without saveLayer and paint with saveLayer.)
1009  ///
1010  /// This will be only rarely needed. One case where you might need this is if
1011  /// you have an image overlaid on a very different background color. In these
1012  /// cases, consider whether you can avoid overlaying multiple colors in one
1013  /// spot (e.g. by having the background color only present where the image is
1014  /// absent). If you can, [antiAlias] would be fine and much faster.
1015  ///
1016  /// See also:
1017  ///
1018  ///  * [antiAlias], which is much faster, and has similar clipping results.
1019  antiAliasWithSaveLayer,
1020}
1021
1022// Indicates that the image should not be resized in this dimension.
1023//
1024// Used by [instantiateImageCodec] as a magical value to disable resizing
1025// in the given dimension.
1026//
1027// This needs to be kept in sync with "kDoNotResizeDimension" in codec.cc
1028const int _kDoNotResizeDimension = -1;
1029
1030/// A description of the style to use when drawing on a [Canvas].
1031///
1032/// Most APIs on [Canvas] take a [Paint] object to describe the style
1033/// to use for that operation.
1034class Paint {
1035  // Paint objects are encoded in two buffers:
1036  //
1037  // * _data is binary data in four-byte fields, each of which is either a
1038  //   uint32_t or a float. The default value for each field is encoded as
1039  //   zero to make initialization trivial. Most values already have a default
1040  //   value of zero, but some, such as color, have a non-zero default value.
1041  //   To encode or decode these values, XOR the value with the default value.
1042  //
1043  // * _objects is a list of unencodable objects, typically wrappers for native
1044  //   objects. The objects are simply stored in the list without any additional
1045  //   encoding.
1046  //
1047  // The binary format must match the deserialization code in paint.cc.
1048
1049  final ByteData _data = ByteData(_kDataByteCount);
1050  static const int _kIsAntiAliasIndex = 0;
1051  static const int _kColorIndex = 1;
1052  static const int _kBlendModeIndex = 2;
1053  static const int _kStyleIndex = 3;
1054  static const int _kStrokeWidthIndex = 4;
1055  static const int _kStrokeCapIndex = 5;
1056  static const int _kStrokeJoinIndex = 6;
1057  static const int _kStrokeMiterLimitIndex = 7;
1058  static const int _kFilterQualityIndex = 8;
1059  static const int _kMaskFilterIndex = 9;
1060  static const int _kMaskFilterBlurStyleIndex = 10;
1061  static const int _kMaskFilterSigmaIndex = 11;
1062  static const int _kInvertColorIndex = 12;
1063
1064  static const int _kIsAntiAliasOffset = _kIsAntiAliasIndex << 2;
1065  static const int _kColorOffset = _kColorIndex << 2;
1066  static const int _kBlendModeOffset = _kBlendModeIndex << 2;
1067  static const int _kStyleOffset = _kStyleIndex << 2;
1068  static const int _kStrokeWidthOffset = _kStrokeWidthIndex << 2;
1069  static const int _kStrokeCapOffset = _kStrokeCapIndex << 2;
1070  static const int _kStrokeJoinOffset = _kStrokeJoinIndex << 2;
1071  static const int _kStrokeMiterLimitOffset = _kStrokeMiterLimitIndex << 2;
1072  static const int _kFilterQualityOffset = _kFilterQualityIndex << 2;
1073  static const int _kMaskFilterOffset = _kMaskFilterIndex << 2;
1074  static const int _kMaskFilterBlurStyleOffset = _kMaskFilterBlurStyleIndex << 2;
1075  static const int _kMaskFilterSigmaOffset = _kMaskFilterSigmaIndex << 2;
1076  static const int _kInvertColorOffset = _kInvertColorIndex << 2;
1077  // If you add more fields, remember to update _kDataByteCount.
1078  static const int _kDataByteCount = 52;
1079
1080  // Binary format must match the deserialization code in paint.cc.
1081  List<dynamic> _objects;
1082  static const int _kShaderIndex = 0;
1083  static const int _kColorFilterIndex = 1;
1084  static const int _kImageFilterIndex = 2;
1085  static const int _kObjectCount = 3; // Must be one larger than the largest index.
1086
1087  /// Whether to apply anti-aliasing to lines and images drawn on the
1088  /// canvas.
1089  ///
1090  /// Defaults to true.
1091  bool get isAntiAlias {
1092    return _data.getInt32(_kIsAntiAliasOffset, _kFakeHostEndian) == 0;
1093  }
1094  set isAntiAlias(bool value) {
1095    // We encode true as zero and false as one because the default value, which
1096    // we always encode as zero, is true.
1097    final int encoded = value ? 0 : 1;
1098    _data.setInt32(_kIsAntiAliasOffset, encoded, _kFakeHostEndian);
1099  }
1100
1101  // Must be kept in sync with the default in paint.cc.
1102  static const int _kColorDefault = 0xFF000000;
1103
1104  /// The color to use when stroking or filling a shape.
1105  ///
1106  /// Defaults to opaque black.
1107  ///
1108  /// See also:
1109  ///
1110  ///  * [style], which controls whether to stroke or fill (or both).
1111  ///  * [colorFilter], which overrides [color].
1112  ///  * [shader], which overrides [color] with more elaborate effects.
1113  ///
1114  /// This color is not used when compositing. To colorize a layer, use
1115  /// [colorFilter].
1116  Color get color {
1117    final int encoded = _data.getInt32(_kColorOffset, _kFakeHostEndian);
1118    return Color(encoded ^ _kColorDefault);
1119  }
1120  set color(Color value) {
1121    assert(value != null);
1122    final int encoded = value.value ^ _kColorDefault;
1123    _data.setInt32(_kColorOffset, encoded, _kFakeHostEndian);
1124  }
1125
1126  // Must be kept in sync with the default in paint.cc.
1127  static final int _kBlendModeDefault = BlendMode.srcOver.index;
1128
1129  /// A blend mode to apply when a shape is drawn or a layer is composited.
1130  ///
1131  /// The source colors are from the shape being drawn (e.g. from
1132  /// [Canvas.drawPath]) or layer being composited (the graphics that were drawn
1133  /// between the [Canvas.saveLayer] and [Canvas.restore] calls), after applying
1134  /// the [colorFilter], if any.
1135  ///
1136  /// The destination colors are from the background onto which the shape or
1137  /// layer is being composited.
1138  ///
1139  /// Defaults to [BlendMode.srcOver].
1140  ///
1141  /// See also:
1142  ///
1143  ///  * [Canvas.saveLayer], which uses its [Paint]'s [blendMode] to composite
1144  ///    the layer when [restore] is called.
1145  ///  * [BlendMode], which discusses the user of [saveLayer] with [blendMode].
1146  BlendMode get blendMode {
1147    final int encoded = _data.getInt32(_kBlendModeOffset, _kFakeHostEndian);
1148    return BlendMode.values[encoded ^ _kBlendModeDefault];
1149  }
1150  set blendMode(BlendMode value) {
1151    assert(value != null);
1152    final int encoded = value.index ^ _kBlendModeDefault;
1153    _data.setInt32(_kBlendModeOffset, encoded, _kFakeHostEndian);
1154  }
1155
1156  /// Whether to paint inside shapes, the edges of shapes, or both.
1157  ///
1158  /// Defaults to [PaintingStyle.fill].
1159  PaintingStyle get style {
1160    return PaintingStyle.values[_data.getInt32(_kStyleOffset, _kFakeHostEndian)];
1161  }
1162  set style(PaintingStyle value) {
1163    assert(value != null);
1164    final int encoded = value.index;
1165    _data.setInt32(_kStyleOffset, encoded, _kFakeHostEndian);
1166  }
1167
1168  /// How wide to make edges drawn when [style] is set to
1169  /// [PaintingStyle.stroke]. The width is given in logical pixels measured in
1170  /// the direction orthogonal to the direction of the path.
1171  ///
1172  /// Defaults to 0.0, which correspond to a hairline width.
1173  double get strokeWidth {
1174    return _data.getFloat32(_kStrokeWidthOffset, _kFakeHostEndian);
1175  }
1176  set strokeWidth(double value) {
1177    assert(value != null);
1178    final double encoded = value;
1179    _data.setFloat32(_kStrokeWidthOffset, encoded, _kFakeHostEndian);
1180  }
1181
1182  /// The kind of finish to place on the end of lines drawn when
1183  /// [style] is set to [PaintingStyle.stroke].
1184  ///
1185  /// Defaults to [StrokeCap.butt], i.e. no caps.
1186  StrokeCap get strokeCap {
1187    return StrokeCap.values[_data.getInt32(_kStrokeCapOffset, _kFakeHostEndian)];
1188  }
1189  set strokeCap(StrokeCap value) {
1190    assert(value != null);
1191    final int encoded = value.index;
1192    _data.setInt32(_kStrokeCapOffset, encoded, _kFakeHostEndian);
1193  }
1194
1195  /// The kind of finish to place on the joins between segments.
1196  ///
1197  /// This applies to paths drawn when [style] is set to [PaintingStyle.stroke],
1198  /// It does not apply to points drawn as lines with [Canvas.drawPoints].
1199  ///
1200  /// Defaults to [StrokeJoin.miter], i.e. sharp corners.
1201  ///
1202  /// Some examples of joins:
1203  ///
1204  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/miter_4_join.mp4}
1205  ///
1206  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/round_join.mp4}
1207  ///
1208  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/bevel_join.mp4}
1209  ///
1210  /// The centers of the line segments are colored in the diagrams above to
1211  /// highlight the joins, but in normal usage the join is the same color as the
1212  /// line.
1213  ///
1214  /// See also:
1215  ///
1216  ///  * [strokeMiterLimit] to control when miters are replaced by bevels when
1217  ///    this is set to [StrokeJoin.miter].
1218  ///  * [strokeCap] to control what is drawn at the ends of the stroke.
1219  ///  * [StrokeJoin] for the definitive list of stroke joins.
1220  StrokeJoin get strokeJoin {
1221    return StrokeJoin.values[_data.getInt32(_kStrokeJoinOffset, _kFakeHostEndian)];
1222  }
1223  set strokeJoin(StrokeJoin value) {
1224    assert(value != null);
1225    final int encoded = value.index;
1226    _data.setInt32(_kStrokeJoinOffset, encoded, _kFakeHostEndian);
1227  }
1228
1229  // Must be kept in sync with the default in paint.cc.
1230  static const double _kStrokeMiterLimitDefault = 4.0;
1231
1232  /// The limit for miters to be drawn on segments when the join is set to
1233  /// [StrokeJoin.miter] and the [style] is set to [PaintingStyle.stroke]. If
1234  /// this limit is exceeded, then a [StrokeJoin.bevel] join will be drawn
1235  /// instead. This may cause some 'popping' of the corners of a path if the
1236  /// angle between line segments is animated, as seen in the diagrams below.
1237  ///
1238  /// This limit is expressed as a limit on the length of the miter.
1239  ///
1240  /// Defaults to 4.0.  Using zero as a limit will cause a [StrokeJoin.bevel]
1241  /// join to be used all the time.
1242  ///
1243  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/miter_0_join.mp4}
1244  ///
1245  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/miter_4_join.mp4}
1246  ///
1247  /// {@animation 300 300 https://flutter.github.io/assets-for-api-docs/assets/dart-ui/miter_6_join.mp4}
1248  ///
1249  /// The centers of the line segments are colored in the diagrams above to
1250  /// highlight the joins, but in normal usage the join is the same color as the
1251  /// line.
1252  ///
1253  /// See also:
1254  ///
1255  ///  * [strokeJoin] to control the kind of finish to place on the joins
1256  ///    between segments.
1257  ///  * [strokeCap] to control what is drawn at the ends of the stroke.
1258  double get strokeMiterLimit {
1259    return _data.getFloat32(_kStrokeMiterLimitOffset, _kFakeHostEndian);
1260  }
1261  set strokeMiterLimit(double value) {
1262    assert(value != null);
1263    final double encoded = value - _kStrokeMiterLimitDefault;
1264    _data.setFloat32(_kStrokeMiterLimitOffset, encoded, _kFakeHostEndian);
1265  }
1266
1267  /// A mask filter (for example, a blur) to apply to a shape after it has been
1268  /// drawn but before it has been composited into the image.
1269  ///
1270  /// See [MaskFilter] for details.
1271  MaskFilter get maskFilter {
1272    switch (_data.getInt32(_kMaskFilterOffset, _kFakeHostEndian)) {
1273      case MaskFilter._TypeNone:
1274        return null;
1275      case MaskFilter._TypeBlur:
1276        return MaskFilter.blur(
1277          BlurStyle.values[_data.getInt32(_kMaskFilterBlurStyleOffset, _kFakeHostEndian)],
1278          _data.getFloat32(_kMaskFilterSigmaOffset, _kFakeHostEndian),
1279        );
1280    }
1281    return null;
1282  }
1283  set maskFilter(MaskFilter value) {
1284    if (value == null) {
1285      _data.setInt32(_kMaskFilterOffset, MaskFilter._TypeNone, _kFakeHostEndian);
1286      _data.setInt32(_kMaskFilterBlurStyleOffset, 0, _kFakeHostEndian);
1287      _data.setFloat32(_kMaskFilterSigmaOffset, 0.0, _kFakeHostEndian);
1288    } else {
1289      // For now we only support one kind of MaskFilter, so we don't need to
1290      // check what the type is if it's not null.
1291      _data.setInt32(_kMaskFilterOffset, MaskFilter._TypeBlur, _kFakeHostEndian);
1292      _data.setInt32(_kMaskFilterBlurStyleOffset, value._style.index, _kFakeHostEndian);
1293      _data.setFloat32(_kMaskFilterSigmaOffset, value._sigma, _kFakeHostEndian);
1294    }
1295  }
1296
1297  /// Controls the performance vs quality trade-off to use when applying
1298  /// filters, such as [maskFilter], or when drawing images, as with
1299  /// [Canvas.drawImageRect] or [Canvas.drawImageNine].
1300  ///
1301  /// Defaults to [FilterQuality.none].
1302  // TODO(ianh): verify that the image drawing methods actually respect this
1303  FilterQuality get filterQuality {
1304    return FilterQuality.values[_data.getInt32(_kFilterQualityOffset, _kFakeHostEndian)];
1305  }
1306  set filterQuality(FilterQuality value) {
1307    assert(value != null);
1308    final int encoded = value.index;
1309    _data.setInt32(_kFilterQualityOffset, encoded, _kFakeHostEndian);
1310  }
1311
1312  /// The shader to use when stroking or filling a shape.
1313  ///
1314  /// When this is null, the [color] is used instead.
1315  ///
1316  /// See also:
1317  ///
1318  ///  * [Gradient], a shader that paints a color gradient.
1319  ///  * [ImageShader], a shader that tiles an [Image].
1320  ///  * [colorFilter], which overrides [shader].
1321  ///  * [color], which is used if [shader] and [colorFilter] are null.
1322  Shader get shader {
1323    if (_objects == null)
1324      return null;
1325    return _objects[_kShaderIndex];
1326  }
1327  set shader(Shader value) {
1328    _objects ??= List<dynamic>(_kObjectCount);
1329    _objects[_kShaderIndex] = value;
1330  }
1331
1332  /// A color filter to apply when a shape is drawn or when a layer is
1333  /// composited.
1334  ///
1335  /// See [ColorFilter] for details.
1336  ///
1337  /// When a shape is being drawn, [colorFilter] overrides [color] and [shader].
1338  ColorFilter get colorFilter {
1339    if (_objects == null || _objects[_kColorFilterIndex] == null) {
1340      return null;
1341    }
1342    return _objects[_kColorFilterIndex].creator;
1343  }
1344
1345  set colorFilter(ColorFilter value) {
1346    final _ColorFilter nativeFilter = value?._toNativeColorFilter();
1347    if (nativeFilter == null) {
1348      if (_objects != null) {
1349        _objects[_kColorFilterIndex] = null;
1350      }
1351    } else {
1352      if (_objects == null) {
1353        _objects = List<dynamic>(_kObjectCount);
1354        _objects[_kColorFilterIndex] = nativeFilter;
1355      } else if (_objects[_kColorFilterIndex]?.creator != value) {
1356        _objects[_kColorFilterIndex] = nativeFilter;
1357      }
1358    }
1359  }
1360
1361  /// The [ImageFilter] to use when drawing raster images.
1362  ///
1363  /// For example, to blur an image using [Canvas.drawImage], apply an
1364  /// [ImageFilter.blur]:
1365  ///
1366  /// ```dart
1367  /// import 'dart:ui' as ui;
1368  ///
1369  /// ui.Image image;
1370  ///
1371  /// void paint(Canvas canvas, Size size) {
1372  ///   canvas.drawImage(
1373  ///     image,
1374  ///     Offset.zero,
1375  ///     Paint()..imageFilter = ui.ImageFilter.blur(sigmaX: .5, sigmaY: .5),
1376  ///   );
1377  /// }
1378  /// ```
1379  ///
1380  /// See also:
1381  ///
1382  ///  * [MaskFilter], which is used for drawing geometry.
1383  ImageFilter get imageFilter {
1384    if (_objects == null)
1385      return null;
1386    return _objects[_kImageFilterIndex];
1387  }
1388  set imageFilter(ImageFilter value) {
1389    _objects ??= List<dynamic>(_kObjectCount);
1390    _objects[_kImageFilterIndex] = value;
1391  }
1392
1393
1394  /// Whether the colors of the image are inverted when drawn.
1395  ///
1396  /// Inverting the colors of an image applies a new color filter that will
1397  /// be composed with any user provided color filters. This is primarily
1398  /// used for implementing smart invert on iOS.
1399  bool get invertColors {
1400    return _data.getInt32(_kInvertColorOffset, _kFakeHostEndian) == 1;
1401  }
1402  set invertColors(bool value) {
1403    _data.setInt32(_kInvertColorOffset, value ? 1 : 0, _kFakeHostEndian);
1404  }
1405
1406  @override
1407  String toString() {
1408    final StringBuffer result = StringBuffer();
1409    String semicolon = '';
1410    result.write('Paint(');
1411    if (style == PaintingStyle.stroke) {
1412      result.write('$style');
1413      if (strokeWidth != 0.0)
1414        result.write(' ${strokeWidth.toStringAsFixed(1)}');
1415      else
1416        result.write(' hairline');
1417      if (strokeCap != StrokeCap.butt)
1418        result.write(' $strokeCap');
1419      if (strokeJoin == StrokeJoin.miter) {
1420        if (strokeMiterLimit != _kStrokeMiterLimitDefault)
1421          result.write(' $strokeJoin up to ${strokeMiterLimit.toStringAsFixed(1)}');
1422      } else {
1423        result.write(' $strokeJoin');
1424      }
1425      semicolon = '; ';
1426    }
1427    if (isAntiAlias != true) {
1428      result.write('${semicolon}antialias off');
1429      semicolon = '; ';
1430    }
1431    if (color != const Color(_kColorDefault)) {
1432      if (color != null)
1433        result.write('$semicolon$color');
1434      else
1435        result.write('${semicolon}no color');
1436      semicolon = '; ';
1437    }
1438    if (blendMode.index != _kBlendModeDefault) {
1439      result.write('$semicolon$blendMode');
1440      semicolon = '; ';
1441    }
1442    if (colorFilter != null) {
1443      result.write('${semicolon}colorFilter: $colorFilter');
1444      semicolon = '; ';
1445    }
1446    if (maskFilter != null) {
1447      result.write('${semicolon}maskFilter: $maskFilter');
1448      semicolon = '; ';
1449    }
1450    if (filterQuality != FilterQuality.none) {
1451      result.write('${semicolon}filterQuality: $filterQuality');
1452      semicolon = '; ';
1453    }
1454    if (shader != null) {
1455      result.write('${semicolon}shader: $shader');
1456      semicolon = '; ';
1457    }
1458    if (imageFilter != null) {
1459      result.write('${semicolon}imageFilter: $imageFilter');
1460      semicolon = '; ';
1461    }
1462    if (invertColors)
1463      result.write('${semicolon}invert: $invertColors');
1464    result.write(')');
1465    return result.toString();
1466  }
1467}
1468
1469/// The format in which image bytes should be returned when using
1470/// [Image.toByteData].
1471enum ImageByteFormat {
1472  /// Raw RGBA format.
1473  ///
1474  /// Unencoded bytes, in RGBA row-primary form, 8 bits per channel.
1475  rawRgba,
1476
1477  /// Raw unmodified format.
1478  ///
1479  /// Unencoded bytes, in the image's existing format. For example, a grayscale
1480  /// image may use a single 8-bit channel for each pixel.
1481  rawUnmodified,
1482
1483  /// PNG format.
1484  ///
1485  /// A loss-less compression format for images. This format is well suited for
1486  /// images with hard edges, such as screenshots or sprites, and images with
1487  /// text. Transparency is supported. The PNG format supports images up to
1488  /// 2,147,483,647 pixels in either dimension, though in practice available
1489  /// memory provides a more immediate limitation on maximum image size.
1490  ///
1491  /// PNG images normally use the `.png` file extension and the `image/png` MIME
1492  /// type.
1493  ///
1494  /// See also:
1495  ///
1496  ///  * <https://en.wikipedia.org/wiki/Portable_Network_Graphics>, the Wikipedia page on PNG.
1497  ///  * <https://tools.ietf.org/rfc/rfc2083.txt>, the PNG standard.
1498  png,
1499}
1500
1501/// The format of pixel data given to [decodeImageFromPixels].
1502enum PixelFormat {
1503  /// Each pixel is 32 bits, with the highest 8 bits encoding red, the next 8
1504  /// bits encoding green, the next 8 bits encoding blue, and the lowest 8 bits
1505  /// encoding alpha.
1506  rgba8888,
1507
1508  /// Each pixel is 32 bits, with the highest 8 bits encoding blue, the next 8
1509  /// bits encoding green, the next 8 bits encoding red, and the lowest 8 bits
1510  /// encoding alpha.
1511  bgra8888,
1512}
1513
1514class _ImageInfo {
1515  _ImageInfo(this.width, this.height, this.format, this.rowBytes) {
1516    rowBytes ??= width * 4;
1517  }
1518
1519  @pragma('vm:entry-point', 'get')
1520  int width;
1521  @pragma('vm:entry-point', 'get')
1522  int height;
1523  @pragma('vm:entry-point', 'get')
1524  int format;
1525  @pragma('vm:entry-point', 'get')
1526  int rowBytes;
1527}
1528
1529/// Opaque handle to raw decoded image data (pixels).
1530///
1531/// To obtain an [Image] object, use [instantiateImageCodec].
1532///
1533/// To draw an [Image], use one of the methods on the [Canvas] class, such as
1534/// [Canvas.drawImage].
1535///
1536/// See also:
1537///
1538///  * [Image](https://api.flutter.dev/flutter/widgets/Image-class.html), the class in the [widgets] library.
1539///
1540@pragma('vm:entry-point')
1541class Image extends NativeFieldWrapperClass2 {
1542  // This class is created by the engine, and should not be instantiated
1543  // or extended directly.
1544  //
1545  // To obtain an [Image] object, use [instantiateImageCodec].
1546  @pragma('vm:entry-point')
1547  Image._();
1548
1549  /// The number of image pixels along the image's horizontal axis.
1550  int get width native 'Image_width';
1551
1552  /// The number of image pixels along the image's vertical axis.
1553  int get height native 'Image_height';
1554
1555  /// Converts the [Image] object into a byte array.
1556  ///
1557  /// The [format] argument specifies the format in which the bytes will be
1558  /// returned.
1559  ///
1560  /// Returns a future that completes with the binary image data or an error
1561  /// if encoding fails.
1562  Future<ByteData> toByteData({ImageByteFormat format = ImageByteFormat.rawRgba}) {
1563    return _futurize((_Callback<ByteData> callback) {
1564      return _toByteData(format.index, (Uint8List encoded) {
1565        callback(encoded?.buffer?.asByteData());
1566      });
1567    });
1568  }
1569
1570  /// Returns an error message on failure, null on success.
1571  String _toByteData(int format, _Callback<Uint8List> callback) native 'Image_toByteData';
1572
1573  /// Release the resources used by this object. The object is no longer usable
1574  /// after this method is called.
1575  void dispose() native 'Image_dispose';
1576
1577  @override
1578  String toString() => '[$width\u00D7$height]';
1579}
1580
1581/// Callback signature for [decodeImageFromList].
1582typedef ImageDecoderCallback = void Function(Image result);
1583
1584/// Information for a single frame of an animation.
1585///
1586/// To obtain an instance of the [FrameInfo] interface, see
1587/// [Codec.getNextFrame].
1588@pragma('vm:entry-point')
1589class FrameInfo extends NativeFieldWrapperClass2 {
1590  /// This class is created by the engine, and should not be instantiated
1591  /// or extended directly.
1592  ///
1593  /// To obtain an instance of the [FrameInfo] interface, see
1594  /// [Codec.getNextFrame].
1595  @pragma('vm:entry-point')
1596  FrameInfo._();
1597
1598  /// The duration this frame should be shown.
1599  Duration get duration => Duration(milliseconds: _durationMillis);
1600  int get _durationMillis native 'FrameInfo_durationMillis';
1601
1602  /// The [Image] object for this frame.
1603  Image get image native 'FrameInfo_image';
1604}
1605
1606/// A handle to an image codec.
1607///
1608/// This class is created by the engine, and should not be instantiated
1609/// or extended directly.
1610///
1611/// To obtain an instance of the [Codec] interface, see
1612/// [instantiateImageCodec].
1613@pragma('vm:entry-point')
1614class Codec extends NativeFieldWrapperClass2 {
1615  //
1616  // This class is created by the engine, and should not be instantiated
1617  // or extended directly.
1618  //
1619  // To obtain an instance of the [Codec] interface, see
1620  // [instantiateImageCodec].
1621  @pragma('vm:entry-point')
1622  Codec._();
1623
1624  /// Number of frames in this image.
1625  int get frameCount native 'Codec_frameCount';
1626
1627  /// Number of times to repeat the animation.
1628  ///
1629  /// * 0 when the animation should be played once.
1630  /// * -1 for infinity repetitions.
1631  int get repetitionCount native 'Codec_repetitionCount';
1632
1633  /// Fetches the next animation frame.
1634  ///
1635  /// Wraps back to the first frame after returning the last frame.
1636  ///
1637  /// The returned future can complete with an error if the decoding has failed.
1638  Future<FrameInfo> getNextFrame() {
1639    return _futurize(_getNextFrame);
1640  }
1641
1642  /// Returns an error message on failure, null on success.
1643  String _getNextFrame(_Callback<FrameInfo> callback) native 'Codec_getNextFrame';
1644
1645  /// Release the resources used by this object. The object is no longer usable
1646  /// after this method is called.
1647  void dispose() native 'Codec_dispose';
1648}
1649
1650/// Instantiates an image codec [Codec] object.
1651///
1652/// [list] is the binary image data (e.g a PNG or GIF binary data).
1653/// The data can be for either static or animated images. The following image
1654/// formats are supported: {@macro flutter.dart:ui.imageFormats}
1655///
1656/// The [targetWidth] and [targetHeight] arguments specify the size of the output
1657/// image, in image pixels. If they are not equal to the intrinsic dimensions of the
1658/// image, then the image will be scaled after being decoded. If exactly one of
1659/// these two arguments is specified, then the aspect ratio will be maintained
1660/// while forcing the image to match the specified dimension. If both are not
1661/// specified, then the image maintains its real size.
1662///
1663/// The returned future can complete with an error if the image decoding has
1664/// failed.
1665Future<Codec> instantiateImageCodec(Uint8List list, {
1666  int targetWidth,
1667  int targetHeight,
1668}) {
1669  return _futurize(
1670    (_Callback<Codec> callback) => _instantiateImageCodec(list, callback, null, targetWidth ?? _kDoNotResizeDimension, targetHeight ?? _kDoNotResizeDimension)
1671  );
1672}
1673
1674/// Instantiates a [Codec] object for an image binary data.
1675///
1676/// The [targetWidth] and [targetHeight] arguments specify the size of the output
1677/// image, in image pixels. Image in this context refers to image in every frame of the [Codec].
1678/// If [targetWidth] and [targetHeight] are not equal to the intrinsic dimensions of the
1679/// image, then the image will be scaled after being decoded. If exactly one of
1680/// these two arguments is not equal to [_kDoNotResizeDimension], then the aspect
1681/// ratio will be maintained while forcing the image to match the given dimension.
1682/// If both are equal to [_kDoNotResizeDimension], then the image maintains its real size.
1683///
1684/// Returns an error message if the instantiation has failed, null otherwise.
1685String _instantiateImageCodec(Uint8List list, _Callback<Codec> callback, _ImageInfo imageInfo, int targetWidth, int targetHeight)
1686  native 'instantiateImageCodec';
1687
1688/// Loads a single image frame from a byte array into an [Image] object.
1689///
1690/// This is a convenience wrapper around [instantiateImageCodec]. Prefer using
1691/// [instantiateImageCodec] which also supports multi frame images.
1692void decodeImageFromList(Uint8List list, ImageDecoderCallback callback) {
1693  _decodeImageFromListAsync(list, callback);
1694}
1695
1696Future<Null> _decodeImageFromListAsync(Uint8List list,
1697                                       ImageDecoderCallback callback) async {
1698  final Codec codec = await instantiateImageCodec(list);
1699  final FrameInfo frameInfo = await codec.getNextFrame();
1700  callback(frameInfo.image);
1701}
1702
1703/// Convert an array of pixel values into an [Image] object.
1704///
1705/// [pixels] is the pixel data in the encoding described by [format].
1706///
1707/// [rowBytes] is the number of bytes consumed by each row of pixels in the
1708/// data buffer.  If unspecified, it defaults to [width] multiplied by the
1709/// number of bytes per pixel in the provided [format].
1710///
1711/// The [targetWidth] and [targetHeight] arguments specify the size of the output
1712/// image, in image pixels. If they are not equal to the intrinsic dimensions of the
1713/// image, then the image will be scaled after being decoded. If exactly one of
1714/// these two arguments is specified, then the aspect ratio will be maintained
1715/// while forcing the image to match the other given dimension. If neither is
1716/// specified, then the image maintains its real size.
1717void decodeImageFromPixels(
1718  Uint8List pixels,
1719  int width,
1720  int height,
1721  PixelFormat format,
1722  ImageDecoderCallback callback,
1723  {int rowBytes, int targetWidth, int targetHeight}
1724) {
1725  final _ImageInfo imageInfo = _ImageInfo(width, height, format.index, rowBytes);
1726  final Future<Codec> codecFuture = _futurize(
1727    (_Callback<Codec> callback) => _instantiateImageCodec(pixels, callback, imageInfo, targetWidth ?? _kDoNotResizeDimension, targetHeight ?? _kDoNotResizeDimension)
1728  );
1729  codecFuture.then((Codec codec) => codec.getNextFrame())
1730      .then((FrameInfo frameInfo) => callback(frameInfo.image));
1731}
1732
1733/// Determines the winding rule that decides how the interior of a [Path] is
1734/// calculated.
1735///
1736/// This enum is used by the [Path.fillType] property.
1737enum PathFillType {
1738  /// The interior is defined by a non-zero sum of signed edge crossings.
1739  ///
1740  /// For a given point, the point is considered to be on the inside of the path
1741  /// if a line drawn from the point to infinity crosses lines going clockwise
1742  /// around the point a different number of times than it crosses lines going
1743  /// counter-clockwise around that point.
1744  ///
1745  /// See: <https://en.wikipedia.org/wiki/Nonzero-rule>
1746  nonZero,
1747
1748  /// The interior is defined by an odd number of edge crossings.
1749  ///
1750  /// For a given point, the point is considered to be on the inside of the path
1751  /// if a line drawn from the point to infinity crosses an odd number of lines.
1752  ///
1753  /// See: <https://en.wikipedia.org/wiki/Even-odd_rule>
1754  evenOdd,
1755}
1756
1757/// Strategies for combining paths.
1758///
1759/// See also:
1760///
1761/// * [Path.combine], which uses this enum to decide how to combine two paths.
1762// Must be kept in sync with SkPathOp
1763enum PathOperation {
1764  /// Subtract the second path from the first path.
1765  ///
1766  /// For example, if the two paths are overlapping circles of equal diameter
1767  /// but differing centers, the result would be a crescent portion of the
1768  /// first circle that was not overlapped by the second circle.
1769  ///
1770  /// See also:
1771  ///
1772  ///  * [reverseDifference], which is the same but subtracting the first path
1773  ///    from the second.
1774  difference,
1775  /// Create a new path that is the intersection of the two paths, leaving the
1776  /// overlapping pieces of the path.
1777  ///
1778  /// For example, if the two paths are overlapping circles of equal diameter
1779  /// but differing centers, the result would be only the overlapping portion
1780  /// of the two circles.
1781  ///
1782  /// See also:
1783  ///  * [xor], which is the inverse of this operation
1784  intersect,
1785  /// Create a new path that is the union (inclusive-or) of the two paths.
1786  ///
1787  /// For example, if the two paths are overlapping circles of equal diameter
1788  /// but differing centers, the result would be a figure-eight like shape
1789  /// matching the outer boundaries of both circles.
1790  union,
1791  /// Create a new path that is the exclusive-or of the two paths, leaving
1792  /// everything but the overlapping pieces of the path.
1793  ///
1794  /// For example, if the two paths are overlapping circles of equal diameter
1795  /// but differing centers, the figure-eight like shape less the overlapping parts
1796  ///
1797  /// See also:
1798  ///  * [intersect], which is the inverse of this operation
1799  xor,
1800  /// Subtract the first path from the second path.
1801  ///
1802  /// For example, if the two paths are overlapping circles of equal diameter
1803  /// but differing centers, the result would be a crescent portion of the
1804  /// second circle that was not overlapped by the first circle.
1805  ///
1806  /// See also:
1807  ///
1808  ///  * [difference], which is the same but subtracting the second path
1809  ///    from the first.
1810  reverseDifference,
1811}
1812
1813/// A handle for the framework to hold and retain an engine layer across frames.
1814@pragma('vm:entry-point')
1815class EngineLayer extends NativeFieldWrapperClass2 {
1816  /// This class is created by the engine, and should not be instantiated
1817  /// or extended directly.
1818  @pragma('vm:entry-point')
1819  EngineLayer._();
1820}
1821
1822/// A complex, one-dimensional subset of a plane.
1823///
1824/// A path consists of a number of sub-paths, and a _current point_.
1825///
1826/// Sub-paths consist of segments of various types, such as lines,
1827/// arcs, or beziers. Sub-paths can be open or closed, and can
1828/// self-intersect.
1829///
1830/// Closed sub-paths enclose a (possibly discontiguous) region of the
1831/// plane based on the current [fillType].
1832///
1833/// The _current point_ is initially at the origin. After each
1834/// operation adding a segment to a sub-path, the current point is
1835/// updated to the end of that segment.
1836///
1837/// Paths can be drawn on canvases using [Canvas.drawPath], and can
1838/// used to create clip regions using [Canvas.clipPath].
1839@pragma('vm:entry-point')
1840class Path extends NativeFieldWrapperClass2 {
1841  /// Create a new empty [Path] object.
1842  @pragma('vm:entry-point')
1843  Path() { _constructor(); }
1844  void _constructor() native 'Path_constructor';
1845
1846  /// Creates a copy of another [Path].
1847  ///
1848  /// This copy is fast and does not require additional memory unless either
1849  /// the `source` path or the path returned by this constructor are modified.
1850  factory Path.from(Path source) {
1851    return source._clone();
1852  }
1853  Path _clone() native 'Path_clone';
1854
1855  /// Determines how the interior of this path is calculated.
1856  ///
1857  /// Defaults to the non-zero winding rule, [PathFillType.nonZero].
1858  PathFillType get fillType => PathFillType.values[_getFillType()];
1859  set fillType(PathFillType value) => _setFillType(value.index);
1860
1861  int _getFillType() native 'Path_getFillType';
1862  void _setFillType(int fillType) native 'Path_setFillType';
1863
1864  /// Starts a new sub-path at the given coordinate.
1865  void moveTo(double x, double y) native 'Path_moveTo';
1866
1867  /// Starts a new sub-path at the given offset from the current point.
1868  void relativeMoveTo(double dx, double dy) native 'Path_relativeMoveTo';
1869
1870  /// Adds a straight line segment from the current point to the given
1871  /// point.
1872  void lineTo(double x, double y) native 'Path_lineTo';
1873
1874  /// Adds a straight line segment from the current point to the point
1875  /// at the given offset from the current point.
1876  void relativeLineTo(double dx, double dy) native 'Path_relativeLineTo';
1877
1878  /// Adds a quadratic bezier segment that curves from the current
1879  /// point to the given point (x2,y2), using the control point
1880  /// (x1,y1).
1881  void quadraticBezierTo(double x1, double y1, double x2, double y2) native 'Path_quadraticBezierTo';
1882
1883  /// Adds a quadratic bezier segment that curves from the current
1884  /// point to the point at the offset (x2,y2) from the current point,
1885  /// using the control point at the offset (x1,y1) from the current
1886  /// point.
1887  void relativeQuadraticBezierTo(double x1, double y1, double x2, double y2) native 'Path_relativeQuadraticBezierTo';
1888
1889  /// Adds a cubic bezier segment that curves from the current point
1890  /// to the given point (x3,y3), using the control points (x1,y1) and
1891  /// (x2,y2).
1892  void cubicTo(double x1, double y1, double x2, double y2, double x3, double y3) native 'Path_cubicTo';
1893
1894  /// Adds a cubic bezier segment that curves from the current point
1895  /// to the point at the offset (x3,y3) from the current point, using
1896  /// the control points at the offsets (x1,y1) and (x2,y2) from the
1897  /// current point.
1898  void relativeCubicTo(double x1, double y1, double x2, double y2, double x3, double y3) native 'Path_relativeCubicTo';
1899
1900  /// Adds a bezier segment that curves from the current point to the
1901  /// given point (x2,y2), using the control points (x1,y1) and the
1902  /// weight w. If the weight is greater than 1, then the curve is a
1903  /// hyperbola; if the weight equals 1, it's a parabola; and if it is
1904  /// less than 1, it is an ellipse.
1905  void conicTo(double x1, double y1, double x2, double y2, double w) native 'Path_conicTo';
1906
1907  /// Adds a bezier segment that curves from the current point to the
1908  /// point at the offset (x2,y2) from the current point, using the
1909  /// control point at the offset (x1,y1) from the current point and
1910  /// the weight w. If the weight is greater than 1, then the curve is
1911  /// a hyperbola; if the weight equals 1, it's a parabola; and if it
1912  /// is less than 1, it is an ellipse.
1913  void relativeConicTo(double x1, double y1, double x2, double y2, double w) native 'Path_relativeConicTo';
1914
1915  /// If the `forceMoveTo` argument is false, adds a straight line
1916  /// segment and an arc segment.
1917  ///
1918  /// If the `forceMoveTo` argument is true, starts a new sub-path
1919  /// consisting of an arc segment.
1920  ///
1921  /// In either case, the arc segment consists of the arc that follows
1922  /// the edge of the oval bounded by the given rectangle, from
1923  /// startAngle radians around the oval up to startAngle + sweepAngle
1924  /// radians around the oval, with zero radians being the point on
1925  /// the right hand side of the oval that crosses the horizontal line
1926  /// that intersects the center of the rectangle and with positive
1927  /// angles going clockwise around the oval.
1928  ///
1929  /// The line segment added if `forceMoveTo` is false starts at the
1930  /// current point and ends at the start of the arc.
1931  void arcTo(Rect rect, double startAngle, double sweepAngle, bool forceMoveTo) {
1932    assert(_rectIsValid(rect));
1933    _arcTo(rect.left, rect.top, rect.right, rect.bottom, startAngle, sweepAngle, forceMoveTo);
1934  }
1935  void _arcTo(double left, double top, double right, double bottom,
1936              double startAngle, double sweepAngle, bool forceMoveTo) native 'Path_arcTo';
1937
1938  /// Appends up to four conic curves weighted to describe an oval of `radius`
1939  /// and rotated by `rotation`.
1940  ///
1941  /// The first curve begins from the last point in the path and the last ends
1942  /// at `arcEnd`. The curves follow a path in a direction determined by
1943  /// `clockwise` and `largeArc` in such a way that the sweep angle
1944  /// is always less than 360 degrees.
1945  ///
1946  /// A simple line is appended if either either radii are zero or the last
1947  /// point in the path is `arcEnd`. The radii are scaled to fit the last path
1948  /// point if both are greater than zero but too small to describe an arc.
1949  ///
1950  void arcToPoint(Offset arcEnd, {
1951    Radius radius = Radius.zero,
1952    double rotation = 0.0,
1953    bool largeArc = false,
1954    bool clockwise = true,
1955    }) {
1956    assert(_offsetIsValid(arcEnd));
1957    assert(_radiusIsValid(radius));
1958    _arcToPoint(arcEnd.dx, arcEnd.dy, radius.x, radius.y, rotation,
1959                largeArc, clockwise);
1960  }
1961  void _arcToPoint(double arcEndX, double arcEndY, double radiusX,
1962                   double radiusY, double rotation, bool largeArc,
1963                   bool clockwise) native 'Path_arcToPoint';
1964
1965
1966  /// Appends up to four conic curves weighted to describe an oval of `radius`
1967  /// and rotated by `rotation`.
1968  ///
1969  /// The last path point is described by (px, py).
1970  ///
1971  /// The first curve begins from the last point in the path and the last ends
1972  /// at `arcEndDelta.dx + px` and `arcEndDelta.dy + py`. The curves follow a
1973  /// path in a direction determined by `clockwise` and `largeArc`
1974  /// in such a way that the sweep angle is always less than 360 degrees.
1975  ///
1976  /// A simple line is appended if either either radii are zero, or, both
1977  /// `arcEndDelta.dx` and `arcEndDelta.dy` are zero. The radii are scaled to
1978  /// fit the last path point if both are greater than zero but too small to
1979  /// describe an arc.
1980  void relativeArcToPoint(Offset arcEndDelta, {
1981    Radius radius = Radius.zero,
1982    double rotation = 0.0,
1983    bool largeArc = false,
1984    bool clockwise = true,
1985    }) {
1986    assert(_offsetIsValid(arcEndDelta));
1987    assert(_radiusIsValid(radius));
1988    _relativeArcToPoint(arcEndDelta.dx, arcEndDelta.dy, radius.x, radius.y,
1989                        rotation, largeArc, clockwise);
1990  }
1991  void _relativeArcToPoint(double arcEndX, double arcEndY, double radiusX,
1992                           double radiusY, double rotation,
1993                           bool largeArc, bool clockwise)
1994                           native 'Path_relativeArcToPoint';
1995
1996  /// Adds a new sub-path that consists of four lines that outline the
1997  /// given rectangle.
1998  void addRect(Rect rect) {
1999    assert(_rectIsValid(rect));
2000    _addRect(rect.left, rect.top, rect.right, rect.bottom);
2001  }
2002  void _addRect(double left, double top, double right, double bottom) native 'Path_addRect';
2003
2004  /// Adds a new sub-path that consists of a curve that forms the
2005  /// ellipse that fills the given rectangle.
2006  ///
2007  /// To add a circle, pass an appropriate rectangle as `oval`. [Rect.fromCircle]
2008  /// can be used to easily describe the circle's center [Offset] and radius.
2009  void addOval(Rect oval) {
2010    assert(_rectIsValid(oval));
2011    _addOval(oval.left, oval.top, oval.right, oval.bottom);
2012  }
2013  void _addOval(double left, double top, double right, double bottom) native 'Path_addOval';
2014
2015  /// Adds a new sub-path with one arc segment that consists of the arc
2016  /// that follows the edge of the oval bounded by the given
2017  /// rectangle, from startAngle radians around the oval up to
2018  /// startAngle + sweepAngle radians around the oval, with zero
2019  /// radians being the point on the right hand side of the oval that
2020  /// crosses the horizontal line that intersects the center of the
2021  /// rectangle and with positive angles going clockwise around the
2022  /// oval.
2023  void addArc(Rect oval, double startAngle, double sweepAngle) {
2024    assert(_rectIsValid(oval));
2025    _addArc(oval.left, oval.top, oval.right, oval.bottom, startAngle, sweepAngle);
2026  }
2027  void _addArc(double left, double top, double right, double bottom,
2028               double startAngle, double sweepAngle) native 'Path_addArc';
2029
2030  /// Adds a new sub-path with a sequence of line segments that connect the given
2031  /// points.
2032  ///
2033  /// If `close` is true, a final line segment will be added that connects the
2034  /// last point to the first point.
2035  ///
2036  /// The `points` argument is interpreted as offsets from the origin.
2037  void addPolygon(List<Offset> points, bool close) {
2038    assert(points != null);
2039    _addPolygon(_encodePointList(points), close);
2040  }
2041  void _addPolygon(Float32List points, bool close) native 'Path_addPolygon';
2042
2043  /// Adds a new sub-path that consists of the straight lines and
2044  /// curves needed to form the rounded rectangle described by the
2045  /// argument.
2046  void addRRect(RRect rrect) {
2047    assert(_rrectIsValid(rrect));
2048    _addRRect(rrect._value32);
2049  }
2050  void _addRRect(Float32List rrect) native 'Path_addRRect';
2051
2052  /// Adds a new sub-path that consists of the given `path` offset by the given
2053  /// `offset`.
2054  ///
2055  /// If `matrix4` is specified, the path will be transformed by this matrix
2056  /// after the matrix is translated by the given offset. The matrix is a 4x4
2057  /// matrix stored in column major order.
2058  void addPath(Path path, Offset offset, {Float64List matrix4}) {
2059    assert(path != null); // path is checked on the engine side
2060    assert(_offsetIsValid(offset));
2061    if (matrix4 != null) {
2062      assert(_matrix4IsValid(matrix4));
2063      _addPathWithMatrix(path, offset.dx, offset.dy, matrix4);
2064    } else {
2065      _addPath(path, offset.dx, offset.dy);
2066    }
2067  }
2068  void _addPath(Path path, double dx, double dy) native 'Path_addPath';
2069  void _addPathWithMatrix(Path path, double dx, double dy, Float64List matrix) native 'Path_addPathWithMatrix';
2070
2071  /// Adds the given path to this path by extending the current segment of this
2072  /// path with the the first segment of the given path.
2073  ///
2074  /// If `matrix4` is specified, the path will be transformed by this matrix
2075  /// after the matrix is translated by the given `offset`.  The matrix is a 4x4
2076  /// matrix stored in column major order.
2077  void extendWithPath(Path path, Offset offset, {Float64List matrix4}) {
2078    assert(path != null); // path is checked on the engine side
2079    assert(_offsetIsValid(offset));
2080    if (matrix4 != null) {
2081      assert(_matrix4IsValid(matrix4));
2082      _extendWithPathAndMatrix(path, offset.dx, offset.dy, matrix4);
2083    } else {
2084      _extendWithPath(path, offset.dx, offset.dy);
2085    }
2086  }
2087  void _extendWithPath(Path path, double dx, double dy) native 'Path_extendWithPath';
2088  void _extendWithPathAndMatrix(Path path, double dx, double dy, Float64List matrix) native 'Path_extendWithPathAndMatrix';
2089
2090  /// Closes the last sub-path, as if a straight line had been drawn
2091  /// from the current point to the first point of the sub-path.
2092  void close() native 'Path_close';
2093
2094  /// Clears the [Path] object of all sub-paths, returning it to the
2095  /// same state it had when it was created. The _current point_ is
2096  /// reset to the origin.
2097  void reset() native 'Path_reset';
2098
2099  /// Tests to see if the given point is within the path. (That is, whether the
2100  /// point would be in the visible portion of the path if the path was used
2101  /// with [Canvas.clipPath].)
2102  ///
2103  /// The `point` argument is interpreted as an offset from the origin.
2104  ///
2105  /// Returns true if the point is in the path, and false otherwise.
2106  bool contains(Offset point) {
2107    assert(_offsetIsValid(point));
2108    return _contains(point.dx, point.dy);
2109  }
2110  bool _contains(double x, double y) native 'Path_contains';
2111
2112  /// Returns a copy of the path with all the segments of every
2113  /// sub-path translated by the given offset.
2114  Path shift(Offset offset) {
2115    assert(_offsetIsValid(offset));
2116    return _shift(offset.dx, offset.dy);
2117  }
2118  Path _shift(double dx, double dy) native 'Path_shift';
2119
2120  /// Returns a copy of the path with all the segments of every
2121  /// sub-path transformed by the given matrix.
2122  Path transform(Float64List matrix4) {
2123    assert(_matrix4IsValid(matrix4));
2124    return _transform(matrix4);
2125  }
2126  Path _transform(Float64List matrix4) native 'Path_transform';
2127
2128  /// Computes the bounding rectangle for this path.
2129  ///
2130  /// A path containing only axis-aligned points on the same straight line will
2131  /// have no area, and therefore `Rect.isEmpty` will return true for such a
2132  /// path. Consider checking `rect.width + rect.height > 0.0` instead, or
2133  /// using the [computeMetrics] API to check the path length.
2134  ///
2135  /// For many more elaborate paths, the bounds may be inaccurate.  For example,
2136  /// when a path contains a circle, the points used to compute the bounds are
2137  /// the circle's implied control points, which form a square around the circle;
2138  /// if the circle has a transformation applied using [transform] then that
2139  /// square is rotated, and the (axis-aligned, non-rotated) bounding box
2140  /// therefore ends up grossly overestimating the actual area covered by the
2141  /// circle.
2142  // see https://skia.org/user/api/SkPath_Reference#SkPath_getBounds
2143  Rect getBounds() {
2144    final Float32List rect = _getBounds();
2145    return Rect.fromLTRB(rect[0], rect[1], rect[2], rect[3]);
2146  }
2147  Float32List _getBounds() native 'Path_getBounds';
2148
2149  /// Combines the two paths according to the manner specified by the given
2150  /// `operation`.
2151  ///
2152  /// The resulting path will be constructed from non-overlapping contours. The
2153  /// curve order is reduced where possible so that cubics may be turned into
2154  /// quadratics, and quadratics maybe turned into lines.
2155  static Path combine(PathOperation operation, Path path1, Path path2) {
2156    assert(path1 != null);
2157    assert(path2 != null);
2158    final Path path = Path();
2159    if (path._op(path1, path2, operation.index)) {
2160      return path;
2161    }
2162    throw StateError('Path.combine() failed.  This may be due an invalid path; in particular, check for NaN values.');
2163  }
2164  bool _op(Path path1, Path path2, int operation) native 'Path_op';
2165
2166  /// Creates a [PathMetrics] object for this path.
2167  ///
2168  /// If `forceClosed` is set to true, the contours of the path will be measured
2169  /// as if they had been closed, even if they were not explicitly closed.
2170  PathMetrics computeMetrics({bool forceClosed = false}) {
2171    return PathMetrics._(this, forceClosed);
2172  }
2173}
2174
2175/// The geometric description of a tangent: the angle at a point.
2176///
2177/// See also:
2178///  * [PathMetric.getTangentForOffset], which returns the tangent of an offset along a path.
2179class Tangent {
2180  /// Creates a [Tangent] with the given values.
2181  ///
2182  /// The arguments must not be null.
2183  const Tangent(this.position, this.vector)
2184    : assert(position != null),
2185      assert(vector != null);
2186
2187  /// Creates a [Tangent] based on the angle rather than the vector.
2188  ///
2189  /// The [vector] is computed to be the unit vector at the given angle, interpreted
2190  /// as clockwise radians from the x axis.
2191  factory Tangent.fromAngle(Offset position, double angle) {
2192    return Tangent(position, Offset(math.cos(angle), math.sin(angle)));
2193  }
2194
2195  /// Position of the tangent.
2196  ///
2197  /// When used with [PathMetric.getTangentForOffset], this represents the precise
2198  /// position that the given offset along the path corresponds to.
2199  final Offset position;
2200
2201  /// The vector of the curve at [position].
2202  ///
2203  /// When used with [PathMetric.getTangentForOffset], this is the vector of the
2204  /// curve that is at the given offset along the path (i.e. the direction of the
2205  /// curve at [position]).
2206  final Offset vector;
2207
2208  /// The direction of the curve at [position].
2209  ///
2210  /// When used with [PathMetric.getTangentForOffset], this is the angle of the
2211  /// curve that is the given offset along the path (i.e. the direction of the
2212  /// curve at [position]).
2213  ///
2214  /// This value is in radians, with 0.0 meaning pointing along the x axis in
2215  /// the positive x-axis direction, positive numbers pointing downward toward
2216  /// the negative y-axis, i.e. in a clockwise direction, and negative numbers
2217  /// pointing upward toward the positive y-axis, i.e. in a counter-clockwise
2218  /// direction.
2219  // flip the sign to be consistent with [Path.arcTo]'s `sweepAngle`
2220  double get angle => -math.atan2(vector.dy, vector.dx);
2221}
2222
2223/// An iterable collection of [PathMetric] objects describing a [Path].
2224///
2225/// A [PathMetrics] object is created by using the [Path.computeMetrics] method,
2226/// and represents the path as it stood at the time of the call. Subsequent
2227/// modifications of the path do not affect the [PathMetrics] object.
2228///
2229/// Each path metric corresponds to a segment, or contour, of a path.
2230///
2231/// For example, a path consisting of a [Path.lineTo], a [Path.moveTo], and
2232/// another [Path.lineTo] will contain two contours and thus be represented by
2233/// two [PathMetric] objects.
2234///
2235/// When iterating across a [PathMetrics]' contours, the [PathMetric] objects are only
2236/// valid until the next one is obtained.
2237class PathMetrics extends collection.IterableBase<PathMetric> {
2238  PathMetrics._(Path path, bool forceClosed) :
2239    _iterator = PathMetricIterator._(_PathMeasure(path, forceClosed));
2240
2241  final Iterator<PathMetric> _iterator;
2242
2243  @override
2244  Iterator<PathMetric> get iterator => _iterator;
2245}
2246
2247/// Tracks iteration from one segment of a path to the next for measurement.
2248class PathMetricIterator implements Iterator<PathMetric> {
2249  PathMetricIterator._(this._pathMeasure) : assert(_pathMeasure != null);
2250
2251  PathMetric _pathMetric;
2252  _PathMeasure _pathMeasure;
2253
2254  @override
2255  PathMetric get current => _pathMetric;
2256
2257  @override
2258  bool moveNext() {
2259    if (_pathMeasure._nextContour()) {
2260      _pathMetric = PathMetric._(_pathMeasure);
2261      return true;
2262    }
2263    _pathMetric = null;
2264    return false;
2265  }
2266}
2267
2268/// Utilities for measuring a [Path] and extracting sub-paths.
2269///
2270/// Iterate over the object returned by [Path.computeMetrics] to obtain
2271/// [PathMetric] objects.
2272///
2273/// Once created, the methods on this class will only be valid while the
2274/// iterator is at the contour for which they were created. It will also only be
2275/// valid for the path as it was specified when [Path.computeMetrics] was called.
2276/// If additional contours are added or any contours are updated, the metrics
2277/// need to be recomputed. Previously created metrics will still refer to a
2278/// snapshot of the path at the time they were computed, rather than to the
2279/// actual metrics for the new mutations to the path.
2280class PathMetric {
2281  PathMetric._(this._measure)
2282    : assert(_measure != null),
2283      length = _measure.length(_measure.currentContourIndex),
2284      isClosed = _measure.isClosed(_measure.currentContourIndex),
2285      contourIndex = _measure.currentContourIndex;
2286
2287  /// Return the total length of the current contour.
2288  final double length;
2289
2290  /// Whether the contour is closed.
2291  ///
2292  /// Returns true if the contour ends with a call to [Path.close] (which may
2293  /// have been implied when using [Path.addRect]) or if `forceClosed` was
2294  /// specified as true in the call to [Path.computeMetrics].  Returns false
2295  /// otherwise.
2296  final bool isClosed;
2297
2298  /// The zero-based index of the contour.
2299  ///
2300  /// [Path] objects are made up of zero or more contours. The first contour is
2301  /// created once a drawing command (e.g. [Path.lineTo]) is issued. A
2302  /// [Path.moveTo] command after a drawing command may create a new contour,
2303  /// although it may not if optimizations are applied that determine the move
2304  /// command did not actually result in moving the pen.
2305  ///
2306  /// This property is only valid with reference to its original iterator and
2307  /// the contours of the path at the time the path's metrics were computed. If
2308  /// additional contours were added or existing contours updated, this metric
2309  /// will be invalid for the current state of the path.
2310  final int contourIndex;
2311
2312  final _PathMeasure _measure;
2313
2314
2315  /// Computes the position of the current contour at the given offset, and the
2316  /// angle of the path at that point.
2317  ///
2318  /// For example, calling this method with a distance of 1.41 for a line from
2319  /// 0.0,0.0 to 2.0,2.0 would give a point 1.0,1.0 and the angle 45 degrees
2320  /// (but in radians).
2321  ///
2322  /// Returns null if the contour has zero [length].
2323  ///
2324  /// The distance is clamped to the [length] of the current contour.
2325  Tangent getTangentForOffset(double distance) {
2326    return _measure.getTangentForOffset(contourIndex, distance);
2327  }
2328
2329  /// Given a start and stop distance, return the intervening segment(s).
2330  ///
2331  /// `start` and `end` are pinned to legal values (0..[length])
2332  /// Returns null if the segment is 0 length or `start` > `stop`.
2333  /// Begin the segment with a moveTo if `startWithMoveTo` is true.
2334  Path extractPath(double start, double end, {bool startWithMoveTo = true}) {
2335    return _measure.extractPath(contourIndex, start, end, startWithMoveTo: startWithMoveTo);
2336  }
2337
2338  @override
2339  String toString() => '$runtimeType{length: $length, isClosed: $isClosed, contourIndex:$contourIndex}';
2340}
2341
2342class _PathMeasure extends NativeFieldWrapperClass2 {
2343  _PathMeasure(Path path, bool forceClosed) {
2344    currentContourIndex = -1; // nextContour will increment this to the zero based index.
2345    _constructor(path, forceClosed);
2346  }
2347  void _constructor(Path path, bool forceClosed) native 'PathMeasure_constructor';
2348
2349  double length(int contourIndex) {
2350    assert(contourIndex <= currentContourIndex, 'Iterator must be advanced before index $contourIndex can be used.');
2351    return _length(contourIndex);
2352  }
2353  double _length(int contourIndex) native 'PathMeasure_getLength';
2354
2355  Tangent getTangentForOffset(int contourIndex, double distance) {
2356    assert(contourIndex <= currentContourIndex, 'Iterator must be advanced before index $contourIndex can be used.');
2357    final Float32List posTan = _getPosTan(contourIndex, distance);
2358    // first entry == 0 indicates that Skia returned false
2359    if (posTan[0] == 0.0) {
2360      return null;
2361    } else {
2362      return Tangent(
2363        Offset(posTan[1], posTan[2]),
2364        Offset(posTan[3], posTan[4])
2365      );
2366    }
2367  }
2368  Float32List _getPosTan(int contourIndex, double distance) native 'PathMeasure_getPosTan';
2369
2370  Path extractPath(int contourIndex, double start, double end, {bool startWithMoveTo = true}) {
2371    assert(contourIndex <= currentContourIndex, 'Iterator must be advanced before index $contourIndex can be used.');
2372    return _extractPath(contourIndex, start, end, startWithMoveTo: startWithMoveTo);
2373  }
2374  Path _extractPath(int contourIndex, double start, double end, {bool startWithMoveTo = true}) native 'PathMeasure_getSegment';
2375
2376  bool isClosed(int contourIndex) {
2377    assert(contourIndex <= currentContourIndex, 'Iterator must be advanced before index $contourIndex can be used.');
2378    return _isClosed(contourIndex);
2379  }
2380  bool _isClosed(int contourIndex) native 'PathMeasure_isClosed';
2381
2382  // Move to the next contour in the path.
2383  //
2384  // A path can have a next contour if [Path.moveTo] was called after drawing began.
2385  // Return true if one exists, or false.
2386  bool _nextContour() {
2387    final bool next = _nativeNextContour();
2388    if (next) {
2389      currentContourIndex++;
2390    }
2391    return next;
2392  }
2393  bool _nativeNextContour() native 'PathMeasure_nextContour';
2394
2395  int currentContourIndex;
2396}
2397
2398/// Styles to use for blurs in [MaskFilter] objects.
2399// These enum values must be kept in sync with SkBlurStyle.
2400enum BlurStyle {
2401  // These mirror SkBlurStyle and must be kept in sync.
2402
2403  /// Fuzzy inside and outside. This is useful for painting shadows that are
2404  /// offset from the shape that ostensibly is casting the shadow.
2405  normal,
2406
2407  /// Solid inside, fuzzy outside. This corresponds to drawing the shape, and
2408  /// additionally drawing the blur. This can make objects appear brighter,
2409  /// maybe even as if they were fluorescent.
2410  solid,
2411
2412  /// Nothing inside, fuzzy outside. This is useful for painting shadows for
2413  /// partially transparent shapes, when they are painted separately but without
2414  /// an offset, so that the shadow doesn't paint below the shape.
2415  outer,
2416
2417  /// Fuzzy inside, nothing outside. This can make shapes appear to be lit from
2418  /// within.
2419  inner,
2420}
2421
2422/// A mask filter to apply to shapes as they are painted. A mask filter is a
2423/// function that takes a bitmap of color pixels, and returns another bitmap of
2424/// color pixels.
2425///
2426/// Instances of this class are used with [Paint.maskFilter] on [Paint] objects.
2427class MaskFilter {
2428  /// Creates a mask filter that takes the shape being drawn and blurs it.
2429  ///
2430  /// This is commonly used to approximate shadows.
2431  ///
2432  /// The `style` argument controls the kind of effect to draw; see [BlurStyle].
2433  ///
2434  /// The `sigma` argument controls the size of the effect. It is the standard
2435  /// deviation of the Gaussian blur to apply. The value must be greater than
2436  /// zero. The sigma corresponds to very roughly half the radius of the effect
2437  /// in pixels.
2438  ///
2439  /// A blur is an expensive operation and should therefore be used sparingly.
2440  ///
2441  /// The arguments must not be null.
2442  ///
2443  /// See also:
2444  ///
2445  ///  * [Canvas.drawShadow], which is a more efficient way to draw shadows.
2446  const MaskFilter.blur(
2447    this._style,
2448    this._sigma,
2449  ) : assert(_style != null),
2450      assert(_sigma != null);
2451
2452  final BlurStyle _style;
2453  final double _sigma;
2454
2455  // The type of MaskFilter class to create for Skia.
2456  // These constants must be kept in sync with MaskFilterType in paint.cc.
2457  static const int _TypeNone = 0; // null
2458  static const int _TypeBlur = 1; // SkBlurMaskFilter
2459
2460  @override
2461  bool operator ==(dynamic other) {
2462    if (other is! MaskFilter)
2463      return false;
2464    final MaskFilter typedOther = other;
2465    return _style == typedOther._style &&
2466           _sigma == typedOther._sigma;
2467  }
2468
2469  @override
2470  int get hashCode => hashValues(_style, _sigma);
2471
2472  @override
2473  String toString() => 'MaskFilter.blur($_style, ${_sigma.toStringAsFixed(1)})';
2474}
2475
2476/// A description of a color filter to apply when drawing a shape or compositing
2477/// a layer with a particular [Paint]. A color filter is a function that takes
2478/// two colors, and outputs one color. When applied during compositing, it is
2479/// independently applied to each pixel of the layer being drawn before the
2480/// entire layer is merged with the destination.
2481///
2482/// Instances of this class are used with [Paint.colorFilter] on [Paint]
2483/// objects.
2484class ColorFilter {
2485  /// Creates a color filter that applies the blend mode given as the second
2486  /// argument. The source color is the one given as the first argument, and the
2487  /// destination color is the one from the layer being composited.
2488  ///
2489  /// The output of this filter is then composited into the background according
2490  /// to the [Paint.blendMode], using the output of this filter as the source
2491  /// and the background as the destination.
2492  const ColorFilter.mode(Color color, BlendMode blendMode)
2493      : _color = color,
2494        _blendMode = blendMode,
2495        _matrix = null,
2496        _type = _TypeMode;
2497
2498  /// Construct a color filter that transforms a color by a 4x5 matrix. The
2499  /// matrix is in row-major order and the translation column is specified in
2500  /// unnormalized, 0...255, space.
2501  const ColorFilter.matrix(List<double> matrix)
2502      : _color = null,
2503        _blendMode = null,
2504        _matrix = matrix,
2505        _type = _TypeMatrix;
2506
2507  /// Construct a color filter that applies the sRGB gamma curve to the RGB
2508  /// channels.
2509  const ColorFilter.linearToSrgbGamma()
2510      : _color = null,
2511        _blendMode = null,
2512        _matrix = null,
2513        _type = _TypeLinearToSrgbGamma;
2514
2515  /// Creates a color filter that applies the inverse of the sRGB gamma curve
2516  /// to the RGB channels.
2517  const ColorFilter.srgbToLinearGamma()
2518      : _color = null,
2519        _blendMode = null,
2520        _matrix = null,
2521        _type = _TypeSrgbToLinearGamma;
2522
2523  final Color _color;
2524  final BlendMode _blendMode;
2525  final List<double> _matrix;
2526  final int _type;
2527
2528  // The type of SkColorFilter class to create for Skia.
2529  // These constants must be kept in sync with ColorFilterType in paint.cc.
2530  static const int _TypeNone = 0; // null
2531  static const int _TypeMode = 1; // MakeModeFilter
2532  static const int _TypeMatrix = 2; // MakeMatrixFilterRowMajor255
2533  static const int _TypeLinearToSrgbGamma = 3; // MakeLinearToSRGBGamma
2534  static const int _TypeSrgbToLinearGamma = 4; // MakeSRGBToLinearGamma
2535
2536  @override
2537  bool operator ==(dynamic other) {
2538    if (other is! ColorFilter) {
2539      return false;
2540    }
2541    final ColorFilter typedOther = other;
2542
2543    if (_type != typedOther._type) {
2544      return false;
2545    }
2546    if (!_listEquals<double>(_matrix, typedOther._matrix)) {
2547      return false;
2548    }
2549
2550    return _color == typedOther._color && _blendMode == typedOther._blendMode;
2551  }
2552
2553  _ColorFilter _toNativeColorFilter() {
2554    switch (_type) {
2555      case _TypeMode:
2556        if (_color == null || _blendMode == null) {
2557          return null;
2558        }
2559        return _ColorFilter.mode(this);
2560      case _TypeMatrix:
2561        if (_matrix == null) {
2562          return null;
2563        }
2564        assert(_matrix.length == 20, 'Color Matrix must have 20 entries.');
2565        return _ColorFilter.matrix(this);
2566      case _TypeLinearToSrgbGamma:
2567        return _ColorFilter.linearToSrgbGamma(this);
2568      case _TypeSrgbToLinearGamma:
2569        return _ColorFilter.srgbToLinearGamma(this);
2570      default:
2571        throw StateError('Unknown mode $_type for ColorFilter.');
2572    }
2573  }
2574
2575  @override
2576  int get hashCode => hashValues(_color, _blendMode, hashList(_matrix), _type);
2577
2578  @override
2579  String toString() {
2580    switch (_type) {
2581      case _TypeMode:
2582        return 'ColorFilter.mode($_color, $_blendMode)';
2583      case _TypeMatrix:
2584        return 'ColorFilter.matrix($_matrix)';
2585      case _TypeLinearToSrgbGamma:
2586        return 'ColorFilter.linearToSrgbGamma()';
2587      case _TypeSrgbToLinearGamma:
2588        return 'ColorFilter.srgbToLinearGamma()';
2589      default:
2590        return 'Unknown ColorFilter type. This is an error. If you\'re seeing this, please file an issue at https://github.com/flutter/flutter/issues/new.';
2591    }
2592  }
2593}
2594
2595/// A [ColorFilter] that is backed by a native SkColorFilter.
2596///
2597/// This is a private class, rather than being the implementation of the public
2598/// ColorFilter, because we want ColorFilter to be const constructible and
2599/// efficiently comparable, so that widgets can check for ColorFilter equality to
2600// avoid repainting.
2601class _ColorFilter extends NativeFieldWrapperClass2 {
2602  _ColorFilter.mode(this.creator)
2603    : assert(creator != null),
2604      assert(creator._type == ColorFilter._TypeMode) {
2605    _constructor();
2606    _initMode(creator._color.value, creator._blendMode.index);
2607  }
2608
2609  _ColorFilter.matrix(this.creator)
2610    : assert(creator != null),
2611      assert(creator._type == ColorFilter._TypeMatrix) {
2612    _constructor();
2613    _initMatrix(Float32List.fromList(creator._matrix));
2614  }
2615  _ColorFilter.linearToSrgbGamma(this.creator)
2616    : assert(creator != null),
2617      assert(creator._type == ColorFilter._TypeLinearToSrgbGamma) {
2618    _constructor();
2619    _initLinearToSrgbGamma();
2620  }
2621
2622  _ColorFilter.srgbToLinearGamma(this.creator)
2623    : assert(creator != null),
2624      assert(creator._type == ColorFilter._TypeSrgbToLinearGamma) {
2625    _constructor();
2626    _initSrgbToLinearGamma();
2627  }
2628
2629  /// The original Dart object that created the native wrapper, which retains
2630  /// the values used for the filter.
2631  final ColorFilter creator;
2632
2633  void _constructor() native 'ColorFilter_constructor';
2634  void _initMode(int color, int blendMode) native 'ColorFilter_initMode';
2635  void _initMatrix(Float32List matrix) native 'ColorFilter_initMatrix';
2636  void _initLinearToSrgbGamma() native 'ColorFilter_initLinearToSrgbGamma';
2637  void _initSrgbToLinearGamma() native 'ColorFilter_initSrgbToLinearGamma';
2638}
2639
2640/// A filter operation to apply to a raster image.
2641///
2642/// See also:
2643///
2644///  * [BackdropFilter], a widget that applies [ImageFilter] to its rendering.
2645///  * [SceneBuilder.pushBackdropFilter], which is the low-level API for using
2646///    this class.
2647class ImageFilter extends NativeFieldWrapperClass2 {
2648  void _constructor() native 'ImageFilter_constructor';
2649
2650  /// Creates an image filter that applies a Gaussian blur.
2651  ImageFilter.blur({ double sigmaX = 0.0, double sigmaY = 0.0 }) {
2652    _constructor();
2653    _initBlur(sigmaX, sigmaY);
2654  }
2655  void _initBlur(double sigmaX, double sigmaY) native 'ImageFilter_initBlur';
2656
2657  /// Creates an image filter that applies a matrix transformation.
2658  ///
2659  /// For example, applying a positive scale matrix (see [Matrix4.diagonal3])
2660  /// when used with [BackdropFilter] would magnify the background image.
2661  ImageFilter.matrix(Float64List matrix4,
2662                     { FilterQuality filterQuality = FilterQuality.low }) {
2663    if (matrix4.length != 16)
2664      throw ArgumentError('"matrix4" must have 16 entries.');
2665    _constructor();
2666    _initMatrix(matrix4, filterQuality.index);
2667  }
2668  void _initMatrix(Float64List matrix4, int filterQuality) native 'ImageFilter_initMatrix';
2669}
2670
2671/// Base class for objects such as [Gradient] and [ImageShader] which
2672/// correspond to shaders as used by [Paint.shader].
2673class Shader extends NativeFieldWrapperClass2 {
2674  /// This class is created by the engine, and should not be instantiated
2675  /// or extended directly.
2676  @pragma('vm:entry-point')
2677  Shader._();
2678}
2679
2680/// Defines what happens at the edge of the gradient.
2681///
2682/// A gradient is defined along a finite inner area. In the case of a linear
2683/// gradient, it's between the parallel lines that are orthogonal to the line
2684/// drawn between two points. In the case of radial gradients, it's the disc
2685/// that covers the circle centered on a particular point up to a given radius.
2686///
2687/// This enum is used to define how the gradient should paint the regions
2688/// outside that defined inner area.
2689///
2690/// See also:
2691///
2692///  * [painting.Gradient], the superclass for [LinearGradient] and
2693///    [RadialGradient], as used by [BoxDecoration] et al, which works in
2694///    relative coordinates and can create a [Shader] representing the gradient
2695///    for a particular [Rect] on demand.
2696///  * [dart:ui.Gradient], the low-level class used when dealing with the
2697///    [Paint.shader] property directly, with its [Gradient.linear] and
2698///    [Gradient.radial] constructors.
2699// These enum values must be kept in sync with SkShader::TileMode.
2700enum TileMode {
2701  /// Edge is clamped to the final color.
2702  ///
2703  /// The gradient will paint the all the regions outside the inner area with
2704  /// the color of the point closest to that region.
2705  ///
2706  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_clamp_radial.png)
2707  clamp,
2708
2709  /// Edge is repeated from first color to last.
2710  ///
2711  /// This is as if the stop points from 0.0 to 1.0 were then repeated from 1.0
2712  /// to 2.0, 2.0 to 3.0, and so forth (and for linear gradients, similarly from
2713  /// -1.0 to 0.0, -2.0 to -1.0, etc).
2714  ///
2715  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_repeated_linear.png)
2716  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_repeated_radial.png)
2717  repeated,
2718
2719  /// Edge is mirrored from last color to first.
2720  ///
2721  /// This is as if the stop points from 0.0 to 1.0 were then repeated backwards
2722  /// from 2.0 to 1.0, then forwards from 2.0 to 3.0, then backwards again from
2723  /// 4.0 to 3.0, and so forth (and for linear gradients, similarly from in the
2724  /// negative direction).
2725  ///
2726  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_mirror_linear.png)
2727  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_mirror_radial.png)
2728  mirror,
2729}
2730
2731Int32List _encodeColorList(List<Color> colors) {
2732  final int colorCount = colors.length;
2733  final Int32List result = Int32List(colorCount);
2734  for (int i = 0; i < colorCount; ++i)
2735    result[i] = colors[i].value;
2736  return result;
2737}
2738
2739Float32List _encodePointList(List<Offset> points) {
2740  assert(points != null);
2741  final int pointCount = points.length;
2742  final Float32List result = Float32List(pointCount * 2);
2743  for (int i = 0; i < pointCount; ++i) {
2744    final int xIndex = i * 2;
2745    final int yIndex = xIndex + 1;
2746    final Offset point = points[i];
2747    assert(_offsetIsValid(point));
2748    result[xIndex] = point.dx;
2749    result[yIndex] = point.dy;
2750  }
2751  return result;
2752}
2753
2754Float32List _encodeTwoPoints(Offset pointA, Offset pointB) {
2755  assert(_offsetIsValid(pointA));
2756  assert(_offsetIsValid(pointB));
2757  final Float32List result = Float32List(4);
2758  result[0] = pointA.dx;
2759  result[1] = pointA.dy;
2760  result[2] = pointB.dx;
2761  result[3] = pointB.dy;
2762  return result;
2763}
2764
2765/// A shader (as used by [Paint.shader]) that renders a color gradient.
2766///
2767/// There are several types of gradients, represented by the various constructors
2768/// on this class.
2769///
2770/// See also:
2771///
2772///  * [Gradient](https://api.flutter.dev/flutter/painting/Gradient-class.html), the class in the [painting] library.
2773///
2774class Gradient extends Shader {
2775
2776  void _constructor() native 'Gradient_constructor';
2777
2778  /// Creates a linear gradient from `from` to `to`.
2779  ///
2780  /// If `colorStops` is provided, `colorStops[i]` is a number from 0.0 to 1.0
2781  /// that specifies where `color[i]` begins in the gradient. If `colorStops` is
2782  /// not provided, then only two stops, at 0.0 and 1.0, are implied (and
2783  /// `color` must therefore only have two entries).
2784  ///
2785  /// The behavior before `from` and after `to` is described by the `tileMode`
2786  /// argument. For details, see the [TileMode] enum.
2787  ///
2788  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_clamp_linear.png)
2789  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_mirror_linear.png)
2790  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_repeated_linear.png)
2791  ///
2792  /// If `from`, `to`, `colors`, or `tileMode` are null, or if `colors` or
2793  /// `colorStops` contain null values, this constructor will throw a
2794  /// [NoSuchMethodError].
2795  ///
2796  /// If `matrix4` is provided, the gradient fill will be transformed by the
2797  /// specified 4x4 matrix relative to the local coordinate system. `matrix4` must
2798  /// be a column-major matrix packed into a list of 16 values.
2799  Gradient.linear(
2800    Offset from,
2801    Offset to,
2802    List<Color> colors, [
2803    List<double> colorStops,
2804    TileMode tileMode = TileMode.clamp,
2805    Float64List matrix4,
2806  ]) : assert(_offsetIsValid(from)),
2807       assert(_offsetIsValid(to)),
2808       assert(colors != null),
2809       assert(tileMode != null),
2810       assert(matrix4 == null || _matrix4IsValid(matrix4)),
2811       super._() {
2812    _validateColorStops(colors, colorStops);
2813    final Float32List endPointsBuffer = _encodeTwoPoints(from, to);
2814    final Int32List colorsBuffer = _encodeColorList(colors);
2815    final Float32List colorStopsBuffer = colorStops == null ? null : Float32List.fromList(colorStops);
2816    _constructor();
2817    _initLinear(endPointsBuffer, colorsBuffer, colorStopsBuffer, tileMode.index, matrix4);
2818  }
2819  void _initLinear(Float32List endPoints, Int32List colors, Float32List colorStops, int tileMode, Float64List matrix4) native 'Gradient_initLinear';
2820
2821  /// Creates a radial gradient centered at `center` that ends at `radius`
2822  /// distance from the center.
2823  ///
2824  /// If `colorStops` is provided, `colorStops[i]` is a number from 0.0 to 1.0
2825  /// that specifies where `color[i]` begins in the gradient. If `colorStops` is
2826  /// not provided, then only two stops, at 0.0 and 1.0, are implied (and
2827  /// `color` must therefore only have two entries).
2828  ///
2829  /// The behavior before and after the radius is described by the `tileMode`
2830  /// argument. For details, see the [TileMode] enum.
2831  ///
2832  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_clamp_radial.png)
2833  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_mirror_radial.png)
2834  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_repeated_radial.png)
2835  ///
2836  /// If `center`, `radius`, `colors`, or `tileMode` are null, or if `colors` or
2837  /// `colorStops` contain null values, this constructor will throw a
2838  /// [NoSuchMethodError].
2839  ///
2840  /// If `matrix4` is provided, the gradient fill will be transformed by the
2841  /// specified 4x4 matrix relative to the local coordinate system. `matrix4` must
2842  /// be a column-major matrix packed into a list of 16 values.
2843  ///
2844  /// If `focal` is provided and not equal to `center` and `focalRadius` is
2845  /// provided and not equal to 0.0, the generated shader will be a two point
2846  /// conical radial gradient, with `focal` being the center of the focal
2847  /// circle and `focalRadius` being the radius of that circle. If `focal` is
2848  /// provided and not equal to `center`, at least one of the two offsets must
2849  /// not be equal to [Offset.zero].
2850  Gradient.radial(
2851    Offset center,
2852    double radius,
2853    List<Color> colors, [
2854    List<double> colorStops,
2855    TileMode tileMode = TileMode.clamp,
2856    Float64List matrix4,
2857    Offset focal,
2858    double focalRadius = 0.0
2859  ]) : assert(_offsetIsValid(center)),
2860       assert(colors != null),
2861       assert(tileMode != null),
2862       assert(matrix4 == null || _matrix4IsValid(matrix4)),
2863       super._() {
2864    focalRadius ??= 0.0;
2865    _validateColorStops(colors, colorStops);
2866    final Int32List colorsBuffer = _encodeColorList(colors);
2867    final Float32List colorStopsBuffer = colorStops == null ? null : Float32List.fromList(colorStops);
2868
2869    // If focal is null or focal radius is null, this should be treated as a regular radial gradient
2870    // If focal == center and the focal radius is 0.0, it's still a regular radial gradient
2871    if (focal == null || (focal == center && focalRadius == 0.0)) {
2872      _constructor();
2873      _initRadial(center.dx, center.dy, radius, colorsBuffer, colorStopsBuffer, tileMode.index, matrix4);
2874    } else {
2875      assert(center != Offset.zero || focal != Offset.zero); // will result in exception(s) in Skia side
2876      _constructor();
2877      _initConical(focal.dx, focal.dy, focalRadius, center.dx, center.dy, radius, colorsBuffer, colorStopsBuffer, tileMode.index, matrix4);
2878    }
2879  }
2880  void _initRadial(double centerX, double centerY, double radius, Int32List colors, Float32List colorStops, int tileMode, Float64List matrix4) native 'Gradient_initRadial';
2881  void _initConical(double startX, double startY, double startRadius, double endX, double endY, double endRadius, Int32List colors, Float32List colorStops, int tileMode, Float64List matrix4) native 'Gradient_initTwoPointConical';
2882
2883  /// Creates a sweep gradient centered at `center` that starts at `startAngle`
2884  /// and ends at `endAngle`.
2885  ///
2886  /// `startAngle` and `endAngle` should be provided in radians, with zero
2887  /// radians being the horizontal line to the right of the `center` and with
2888  /// positive angles going clockwise around the `center`.
2889  ///
2890  /// If `colorStops` is provided, `colorStops[i]` is a number from 0.0 to 1.0
2891  /// that specifies where `color[i]` begins in the gradient. If `colorStops` is
2892  /// not provided, then only two stops, at 0.0 and 1.0, are implied (and
2893  /// `color` must therefore only have two entries).
2894  ///
2895  /// The behavior before `startAngle` and after `endAngle` is described by the
2896  /// `tileMode` argument. For details, see the [TileMode] enum.
2897  ///
2898  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_clamp_sweep.png)
2899  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_mirror_sweep.png)
2900  /// ![](https://flutter.github.io/assets-for-api-docs/assets/dart-ui/tile_mode_repeated_sweep.png)
2901  ///
2902  /// If `center`, `colors`, `tileMode`, `startAngle`, or `endAngle` are null,
2903  /// or if `colors` or `colorStops` contain null values, this constructor will
2904  /// throw a [NoSuchMethodError].
2905  ///
2906  /// If `matrix4` is provided, the gradient fill will be transformed by the
2907  /// specified 4x4 matrix relative to the local coordinate system. `matrix4` must
2908  /// be a column-major matrix packed into a list of 16 values.
2909  Gradient.sweep(
2910    Offset center,
2911    List<Color> colors, [
2912    List<double> colorStops,
2913    TileMode tileMode = TileMode.clamp,
2914    double startAngle = 0.0,
2915    double endAngle = math.pi * 2,
2916    Float64List matrix4,
2917  ]) : assert(_offsetIsValid(center)),
2918       assert(colors != null),
2919       assert(tileMode != null),
2920       assert(startAngle != null),
2921       assert(endAngle != null),
2922       assert(startAngle < endAngle),
2923       assert(matrix4 == null || _matrix4IsValid(matrix4)),
2924       super._() {
2925    _validateColorStops(colors, colorStops);
2926    final Int32List colorsBuffer = _encodeColorList(colors);
2927    final Float32List colorStopsBuffer = colorStops == null ? null : Float32List.fromList(colorStops);
2928    _constructor();
2929    _initSweep(center.dx, center.dy, colorsBuffer, colorStopsBuffer, tileMode.index, startAngle, endAngle, matrix4);
2930  }
2931  void _initSweep(double centerX, double centerY, Int32List colors, Float32List colorStops, int tileMode, double startAngle, double endAngle, Float64List matrix) native 'Gradient_initSweep';
2932
2933  static void _validateColorStops(List<Color> colors, List<double> colorStops) {
2934    if (colorStops == null) {
2935      if (colors.length != 2)
2936        throw ArgumentError('"colors" must have length 2 if "colorStops" is omitted.');
2937    } else {
2938      if (colors.length != colorStops.length)
2939        throw ArgumentError('"colors" and "colorStops" arguments must have equal length.');
2940    }
2941  }
2942}
2943
2944/// A shader (as used by [Paint.shader]) that tiles an image.
2945class ImageShader extends Shader {
2946  /// Creates an image-tiling shader. The first argument specifies the image to
2947  /// tile. The second and third arguments specify the [TileMode] for the x
2948  /// direction and y direction respectively. The fourth argument gives the
2949  /// matrix to apply to the effect. All the arguments are required and must not
2950  /// be null.
2951  @pragma('vm:entry-point')
2952  ImageShader(Image image, TileMode tmx, TileMode tmy, Float64List matrix4) :
2953    assert(image != null), // image is checked on the engine side
2954    assert(tmx != null),
2955    assert(tmy != null),
2956    assert(matrix4 != null),
2957    super._() {
2958    if (matrix4.length != 16)
2959      throw ArgumentError('"matrix4" must have 16 entries.');
2960    _constructor();
2961    _initWithImage(image, tmx.index, tmy.index, matrix4);
2962  }
2963  void _constructor() native 'ImageShader_constructor';
2964  void _initWithImage(Image image, int tmx, int tmy, Float64List matrix4) native 'ImageShader_initWithImage';
2965}
2966
2967/// Defines how a list of points is interpreted when drawing a set of triangles.
2968///
2969/// Used by [Canvas.drawVertices].
2970// These enum values must be kept in sync with SkVertices::VertexMode.
2971enum VertexMode {
2972  /// Draw each sequence of three points as the vertices of a triangle.
2973  triangles,
2974
2975  /// Draw each sliding window of three points as the vertices of a triangle.
2976  triangleStrip,
2977
2978  /// Draw the first point and each sliding window of two points as the vertices of a triangle.
2979  triangleFan,
2980}
2981
2982/// A set of vertex data used by [Canvas.drawVertices].
2983class Vertices extends NativeFieldWrapperClass2 {
2984  Vertices(
2985    VertexMode mode,
2986    List<Offset> positions, {
2987    List<Offset> textureCoordinates,
2988    List<Color> colors,
2989    List<int> indices,
2990  }) : assert(mode != null),
2991       assert(positions != null) {
2992    if (textureCoordinates != null && textureCoordinates.length != positions.length)
2993      throw ArgumentError('"positions" and "textureCoordinates" lengths must match.');
2994    if (colors != null && colors.length != positions.length)
2995      throw ArgumentError('"positions" and "colors" lengths must match.');
2996    if (indices != null && indices.any((int i) => i < 0 || i >= positions.length))
2997      throw ArgumentError('"indices" values must be valid indices in the positions list.');
2998
2999    final Float32List encodedPositions = _encodePointList(positions);
3000    final Float32List encodedTextureCoordinates = (textureCoordinates != null)
3001      ? _encodePointList(textureCoordinates)
3002      : null;
3003    final Int32List encodedColors = colors != null
3004      ? _encodeColorList(colors)
3005      : null;
3006    final Uint16List encodedIndices = indices != null
3007      ? Uint16List.fromList(indices)
3008      : null;
3009
3010    _constructor();
3011    if (!_init(mode.index, encodedPositions, encodedTextureCoordinates, encodedColors, encodedIndices))
3012      throw ArgumentError('Invalid configuration for vertices.');
3013  }
3014
3015  Vertices.raw(
3016    VertexMode mode,
3017    Float32List positions, {
3018    Float32List textureCoordinates,
3019    Int32List colors,
3020    Uint16List indices,
3021  }) : assert(mode != null),
3022       assert(positions != null) {
3023    if (textureCoordinates != null && textureCoordinates.length != positions.length)
3024      throw ArgumentError('"positions" and "textureCoordinates" lengths must match.');
3025    if (colors != null && colors.length * 2 != positions.length)
3026      throw ArgumentError('"positions" and "colors" lengths must match.');
3027    if (indices != null && indices.any((int i) => i < 0 || i >= positions.length))
3028      throw ArgumentError('"indices" values must be valid indices in the positions list.');
3029
3030    _constructor();
3031    if (!_init(mode.index, positions, textureCoordinates, colors, indices))
3032      throw ArgumentError('Invalid configuration for vertices.');
3033  }
3034
3035  void _constructor() native 'Vertices_constructor';
3036
3037  bool _init(int mode,
3038             Float32List positions,
3039             Float32List textureCoordinates,
3040             Int32List colors,
3041             Uint16List indices) native 'Vertices_init';
3042}
3043
3044/// Defines how a list of points is interpreted when drawing a set of points.
3045///
3046// ignore: deprecated_member_use
3047/// Used by [Canvas.drawPoints].
3048// These enum values must be kept in sync with SkCanvas::PointMode.
3049enum PointMode {
3050  /// Draw each point separately.
3051  ///
3052  /// If the [Paint.strokeCap] is [StrokeCap.round], then each point is drawn
3053  /// as a circle with the diameter of the [Paint.strokeWidth], filled as
3054  /// described by the [Paint] (ignoring [Paint.style]).
3055  ///
3056  /// Otherwise, each point is drawn as an axis-aligned square with sides of
3057  /// length [Paint.strokeWidth], filled as described by the [Paint] (ignoring
3058  /// [Paint.style]).
3059  points,
3060
3061  /// Draw each sequence of two points as a line segment.
3062  ///
3063  /// If the number of points is odd, then the last point is ignored.
3064  ///
3065  /// The lines are stroked as described by the [Paint] (ignoring
3066  /// [Paint.style]).
3067  lines,
3068
3069  /// Draw the entire sequence of point as one line.
3070  ///
3071  /// The lines are stroked as described by the [Paint] (ignoring
3072  /// [Paint.style]).
3073  polygon,
3074}
3075
3076/// Defines how a new clip region should be merged with the existing clip
3077/// region.
3078///
3079/// Used by [Canvas.clipRect].
3080enum ClipOp {
3081  /// Subtract the new region from the existing region.
3082  difference,
3083
3084  /// Intersect the new region from the existing region.
3085  intersect,
3086}
3087
3088/// An interface for recording graphical operations.
3089///
3090/// [Canvas] objects are used in creating [Picture] objects, which can
3091/// themselves be used with a [SceneBuilder] to build a [Scene]. In
3092/// normal usage, however, this is all handled by the framework.
3093///
3094/// A canvas has a current transformation matrix which is applied to all
3095/// operations. Initially, the transformation matrix is the identity transform.
3096/// It can be modified using the [translate], [scale], [rotate], [skew],
3097/// and [transform] methods.
3098///
3099/// A canvas also has a current clip region which is applied to all operations.
3100/// Initially, the clip region is infinite. It can be modified using the
3101/// [clipRect], [clipRRect], and [clipPath] methods.
3102///
3103/// The current transform and clip can be saved and restored using the stack
3104/// managed by the [save], [saveLayer], and [restore] methods.
3105class Canvas extends NativeFieldWrapperClass2 {
3106  /// Creates a canvas for recording graphical operations into the
3107  /// given picture recorder.
3108  ///
3109  /// Graphical operations that affect pixels entirely outside the given
3110  /// `cullRect` might be discarded by the implementation. However, the
3111  /// implementation might draw outside these bounds if, for example, a command
3112  /// draws partially inside and outside the `cullRect`. To ensure that pixels
3113  /// outside a given region are discarded, consider using a [clipRect]. The
3114  /// `cullRect` is optional; by default, all operations are kept.
3115  ///
3116  /// To end the recording, call [PictureRecorder.endRecording] on the
3117  /// given recorder.
3118  @pragma('vm:entry-point')
3119  Canvas(PictureRecorder recorder, [ Rect cullRect ]) : assert(recorder != null) {
3120    if (recorder.isRecording)
3121      throw ArgumentError('"recorder" must not already be associated with another Canvas.');
3122    cullRect ??= Rect.largest;
3123    _constructor(recorder, cullRect.left, cullRect.top, cullRect.right, cullRect.bottom);
3124  }
3125  void _constructor(PictureRecorder recorder,
3126                    double left,
3127                    double top,
3128                    double right,
3129                    double bottom) native 'Canvas_constructor';
3130
3131  /// Saves a copy of the current transform and clip on the save stack.
3132  ///
3133  /// Call [restore] to pop the save stack.
3134  ///
3135  /// See also:
3136  ///
3137  ///  * [saveLayer], which does the same thing but additionally also groups the
3138  ///    commands done until the matching [restore].
3139  void save() native 'Canvas_save';
3140
3141  /// Saves a copy of the current transform and clip on the save stack, and then
3142  /// creates a new group which subsequent calls will become a part of. When the
3143  /// save stack is later popped, the group will be flattened into a layer and
3144  /// have the given `paint`'s [Paint.colorFilter] and [Paint.blendMode]
3145  /// applied.
3146  ///
3147  /// This lets you create composite effects, for example making a group of
3148  /// drawing commands semi-transparent. Without using [saveLayer], each part of
3149  /// the group would be painted individually, so where they overlap would be
3150  /// darker than where they do not. By using [saveLayer] to group them
3151  /// together, they can be drawn with an opaque color at first, and then the
3152  /// entire group can be made transparent using the [saveLayer]'s paint.
3153  ///
3154  /// Call [restore] to pop the save stack and apply the paint to the group.
3155  ///
3156  /// ## Using saveLayer with clips
3157  ///
3158  /// When a rectangular clip operation (from [clipRect]) is not axis-aligned
3159  /// with the raster buffer, or when the clip operation is not rectilinear
3160  /// (e.g. because it is a rounded rectangle clip created by [clipRRect] or an
3161  /// arbitrarily complicated path clip created by [clipPath]), the edge of the
3162  /// clip needs to be anti-aliased.
3163  ///
3164  /// If two draw calls overlap at the edge of such a clipped region, without
3165  /// using [saveLayer], the first drawing will be anti-aliased with the
3166  /// background first, and then the second will be anti-aliased with the result
3167  /// of blending the first drawing and the background. On the other hand, if
3168  /// [saveLayer] is used immediately after establishing the clip, the second
3169  /// drawing will cover the first in the layer, and thus the second alone will
3170  /// be anti-aliased with the background when the layer is clipped and
3171  /// composited (when [restore] is called).
3172  ///
3173  /// For example, this [CustomPainter.paint] method paints a clean white
3174  /// rounded rectangle:
3175  ///
3176  /// ```dart
3177  /// void paint(Canvas canvas, Size size) {
3178  ///   Rect rect = Offset.zero & size;
3179  ///   canvas.save();
3180  ///   canvas.clipRRect(new RRect.fromRectXY(rect, 100.0, 100.0));
3181  ///   canvas.saveLayer(rect, Paint());
3182  ///   canvas.drawPaint(new Paint()..color = Colors.red);
3183  ///   canvas.drawPaint(new Paint()..color = Colors.white);
3184  ///   canvas.restore();
3185  ///   canvas.restore();
3186  /// }
3187  /// ```
3188  ///
3189  /// On the other hand, this one renders a red outline, the result of the red
3190  /// paint being anti-aliased with the background at the clip edge, then the
3191  /// white paint being similarly anti-aliased with the background _including
3192  /// the clipped red paint_:
3193  ///
3194  /// ```dart
3195  /// void paint(Canvas canvas, Size size) {
3196  ///   // (this example renders poorly, prefer the example above)
3197  ///   Rect rect = Offset.zero & size;
3198  ///   canvas.save();
3199  ///   canvas.clipRRect(new RRect.fromRectXY(rect, 100.0, 100.0));
3200  ///   canvas.drawPaint(new Paint()..color = Colors.red);
3201  ///   canvas.drawPaint(new Paint()..color = Colors.white);
3202  ///   canvas.restore();
3203  /// }
3204  /// ```
3205  ///
3206  /// This point is moot if the clip only clips one draw operation. For example,
3207  /// the following paint method paints a pair of clean white rounded
3208  /// rectangles, even though the clips are not done on a separate layer:
3209  ///
3210  /// ```dart
3211  /// void paint(Canvas canvas, Size size) {
3212  ///   canvas.save();
3213  ///   canvas.clipRRect(new RRect.fromRectXY(Offset.zero & (size / 2.0), 50.0, 50.0));
3214  ///   canvas.drawPaint(new Paint()..color = Colors.white);
3215  ///   canvas.restore();
3216  ///   canvas.save();
3217  ///   canvas.clipRRect(new RRect.fromRectXY(size.center(Offset.zero) & (size / 2.0), 50.0, 50.0));
3218  ///   canvas.drawPaint(new Paint()..color = Colors.white);
3219  ///   canvas.restore();
3220  /// }
3221  /// ```
3222  ///
3223  /// (Incidentally, rather than using [clipRRect] and [drawPaint] to draw
3224  /// rounded rectangles like this, prefer the [drawRRect] method. These
3225  /// examples are using [drawPaint] as a proxy for "complicated draw operations
3226  /// that will get clipped", to illustrate the point.)
3227  ///
3228  /// ## Performance considerations
3229  ///
3230  /// Generally speaking, [saveLayer] is relatively expensive.
3231  ///
3232  /// There are a several different hardware architectures for GPUs (graphics
3233  /// processing units, the hardware that handles graphics), but most of them
3234  /// involve batching commands and reordering them for performance. When layers
3235  /// are used, they cause the rendering pipeline to have to switch render
3236  /// target (from one layer to another). Render target switches can flush the
3237  /// GPU's command buffer, which typically means that optimizations that one
3238  /// could get with larger batching are lost. Render target switches also
3239  /// generate a lot of memory churn because the GPU needs to copy out the
3240  /// current frame buffer contents from the part of memory that's optimized for
3241  /// writing, and then needs to copy it back in once the previous render target
3242  /// (layer) is restored.
3243  ///
3244  /// See also:
3245  ///
3246  ///  * [save], which saves the current state, but does not create a new layer
3247  ///    for subsequent commands.
3248  ///  * [BlendMode], which discusses the use of [Paint.blendMode] with
3249  ///    [saveLayer].
3250  void saveLayer(Rect bounds, Paint paint) {
3251    assert(paint != null);
3252    if (bounds == null) {
3253      _saveLayerWithoutBounds(paint._objects, paint._data);
3254    } else {
3255      assert(_rectIsValid(bounds));
3256      _saveLayer(bounds.left, bounds.top, bounds.right, bounds.bottom,
3257                 paint._objects, paint._data);
3258    }
3259  }
3260  void _saveLayerWithoutBounds(List<dynamic> paintObjects, ByteData paintData)
3261      native 'Canvas_saveLayerWithoutBounds';
3262  void _saveLayer(double left,
3263                  double top,
3264                  double right,
3265                  double bottom,
3266                  List<dynamic> paintObjects,
3267                  ByteData paintData) native 'Canvas_saveLayer';
3268
3269  /// Pops the current save stack, if there is anything to pop.
3270  /// Otherwise, does nothing.
3271  ///
3272  /// Use [save] and [saveLayer] to push state onto the stack.
3273  ///
3274  /// If the state was pushed with with [saveLayer], then this call will also
3275  /// cause the new layer to be composited into the previous layer.
3276  void restore() native 'Canvas_restore';
3277
3278  /// Returns the number of items on the save stack, including the
3279  /// initial state. This means it returns 1 for a clean canvas, and
3280  /// that each call to [save] and [saveLayer] increments it, and that
3281  /// each matching call to [restore] decrements it.
3282  ///
3283  /// This number cannot go below 1.
3284  int getSaveCount() native 'Canvas_getSaveCount';
3285
3286  /// Add a translation to the current transform, shifting the coordinate space
3287  /// horizontally by the first argument and vertically by the second argument.
3288  void translate(double dx, double dy) native 'Canvas_translate';
3289
3290  /// Add an axis-aligned scale to the current transform, scaling by the first
3291  /// argument in the horizontal direction and the second in the vertical
3292  /// direction.
3293  ///
3294  /// If [sy] is unspecified, [sx] will be used for the scale in both
3295  /// directions.
3296  void scale(double sx, [double sy]) => _scale(sx, sy ?? sx);
3297
3298  void _scale(double sx, double sy) native 'Canvas_scale';
3299
3300  /// Add a rotation to the current transform. The argument is in radians clockwise.
3301  void rotate(double radians) native 'Canvas_rotate';
3302
3303  /// Add an axis-aligned skew to the current transform, with the first argument
3304  /// being the horizontal skew in rise over run units clockwise around the
3305  /// origin, and the second argument being the vertical skew in rise over run
3306  /// units clockwise around the origin.
3307  void skew(double sx, double sy) native 'Canvas_skew';
3308
3309  /// Multiply the current transform by the specified 4⨉4 transformation matrix
3310  /// specified as a list of values in column-major order.
3311  void transform(Float64List matrix4) {
3312    assert(matrix4 != null);
3313    if (matrix4.length != 16)
3314      throw ArgumentError('"matrix4" must have 16 entries.');
3315    _transform(matrix4);
3316  }
3317  void _transform(Float64List matrix4) native 'Canvas_transform';
3318
3319  /// Reduces the clip region to the intersection of the current clip and the
3320  /// given rectangle.
3321  ///
3322  /// If [doAntiAlias] is true, then the clip will be anti-aliased.
3323  ///
3324  /// If multiple draw commands intersect with the clip boundary, this can result
3325  /// in incorrect blending at the clip boundary. See [saveLayer] for a
3326  /// discussion of how to address that.
3327  ///
3328  /// Use [ClipOp.difference] to subtract the provided rectangle from the
3329  /// current clip.
3330  void clipRect(Rect rect, { ClipOp clipOp = ClipOp.intersect, bool doAntiAlias = true }) {
3331    assert(_rectIsValid(rect));
3332    assert(clipOp != null);
3333    assert(doAntiAlias != null);
3334    _clipRect(rect.left, rect.top, rect.right, rect.bottom, clipOp.index, doAntiAlias);
3335  }
3336  void _clipRect(double left,
3337                 double top,
3338                 double right,
3339                 double bottom,
3340                 int clipOp,
3341                 bool doAntiAlias) native 'Canvas_clipRect';
3342
3343  /// Reduces the clip region to the intersection of the current clip and the
3344  /// given rounded rectangle.
3345  ///
3346  /// If [doAntiAlias] is true, then the clip will be anti-aliased.
3347  ///
3348  /// If multiple draw commands intersect with the clip boundary, this can result
3349  /// in incorrect blending at the clip boundary. See [saveLayer] for a
3350  /// discussion of how to address that and some examples of using [clipRRect].
3351  void clipRRect(RRect rrect, {bool doAntiAlias = true}) {
3352    assert(_rrectIsValid(rrect));
3353    assert(doAntiAlias != null);
3354    _clipRRect(rrect._value32, doAntiAlias);
3355  }
3356  void _clipRRect(Float32List rrect, bool doAntiAlias) native 'Canvas_clipRRect';
3357
3358  /// Reduces the clip region to the intersection of the current clip and the
3359  /// given [Path].
3360  ///
3361  /// If [doAntiAlias] is true, then the clip will be anti-aliased.
3362  ///
3363  /// If multiple draw commands intersect with the clip boundary, this can result
3364  /// multiple draw commands intersect with the clip boundary, this can result
3365  /// in incorrect blending at the clip boundary. See [saveLayer] for a
3366  /// discussion of how to address that.
3367  void clipPath(Path path, {bool doAntiAlias = true}) {
3368    assert(path != null); // path is checked on the engine side
3369    assert(doAntiAlias != null);
3370    _clipPath(path, doAntiAlias);
3371  }
3372  void _clipPath(Path path, bool doAntiAlias) native 'Canvas_clipPath';
3373
3374  /// Paints the given [Color] onto the canvas, applying the given
3375  /// [BlendMode], with the given color being the source and the background
3376  /// being the destination.
3377  void drawColor(Color color, BlendMode blendMode) {
3378    assert(color != null);
3379    assert(blendMode != null);
3380    _drawColor(color.value, blendMode.index);
3381  }
3382  void _drawColor(int color, int blendMode) native 'Canvas_drawColor';
3383
3384  /// Draws a line between the given points using the given paint. The line is
3385  /// stroked, the value of the [Paint.style] is ignored for this call.
3386  ///
3387  /// The `p1` and `p2` arguments are interpreted as offsets from the origin.
3388  void drawLine(Offset p1, Offset p2, Paint paint) {
3389    assert(_offsetIsValid(p1));
3390    assert(_offsetIsValid(p2));
3391    assert(paint != null);
3392    _drawLine(p1.dx, p1.dy, p2.dx, p2.dy, paint._objects, paint._data);
3393  }
3394  void _drawLine(double x1,
3395                 double y1,
3396                 double x2,
3397                 double y2,
3398                 List<dynamic> paintObjects,
3399                 ByteData paintData) native 'Canvas_drawLine';
3400
3401  /// Fills the canvas with the given [Paint].
3402  ///
3403  /// To fill the canvas with a solid color and blend mode, consider
3404  /// [drawColor] instead.
3405  void drawPaint(Paint paint) {
3406    assert(paint != null);
3407    _drawPaint(paint._objects, paint._data);
3408  }
3409  void _drawPaint(List<dynamic> paintObjects, ByteData paintData) native 'Canvas_drawPaint';
3410
3411  /// Draws a rectangle with the given [Paint]. Whether the rectangle is filled
3412  /// or stroked (or both) is controlled by [Paint.style].
3413  void drawRect(Rect rect, Paint paint) {
3414    assert(_rectIsValid(rect));
3415    assert(paint != null);
3416    _drawRect(rect.left, rect.top, rect.right, rect.bottom,
3417              paint._objects, paint._data);
3418  }
3419  void _drawRect(double left,
3420                 double top,
3421                 double right,
3422                 double bottom,
3423                 List<dynamic> paintObjects,
3424                 ByteData paintData) native 'Canvas_drawRect';
3425
3426  /// Draws a rounded rectangle with the given [Paint]. Whether the rectangle is
3427  /// filled or stroked (or both) is controlled by [Paint.style].
3428  void drawRRect(RRect rrect, Paint paint) {
3429    assert(_rrectIsValid(rrect));
3430    assert(paint != null);
3431    _drawRRect(rrect._value32, paint._objects, paint._data);
3432  }
3433  void _drawRRect(Float32List rrect,
3434                  List<dynamic> paintObjects,
3435                  ByteData paintData) native 'Canvas_drawRRect';
3436
3437  /// Draws a shape consisting of the difference between two rounded rectangles
3438  /// with the given [Paint]. Whether this shape is filled or stroked (or both)
3439  /// is controlled by [Paint.style].
3440  ///
3441  /// This shape is almost but not quite entirely unlike an annulus.
3442  void drawDRRect(RRect outer, RRect inner, Paint paint) {
3443    assert(_rrectIsValid(outer));
3444    assert(_rrectIsValid(inner));
3445    assert(paint != null);
3446    _drawDRRect(outer._value32, inner._value32, paint._objects, paint._data);
3447  }
3448  void _drawDRRect(Float32List outer,
3449                   Float32List inner,
3450                   List<dynamic> paintObjects,
3451                   ByteData paintData) native 'Canvas_drawDRRect';
3452
3453  /// Draws an axis-aligned oval that fills the given axis-aligned rectangle
3454  /// with the given [Paint]. Whether the oval is filled or stroked (or both) is
3455  /// controlled by [Paint.style].
3456  void drawOval(Rect rect, Paint paint) {
3457    assert(_rectIsValid(rect));
3458    assert(paint != null);
3459    _drawOval(rect.left, rect.top, rect.right, rect.bottom,
3460              paint._objects, paint._data);
3461  }
3462  void _drawOval(double left,
3463                 double top,
3464                 double right,
3465                 double bottom,
3466                 List<dynamic> paintObjects,
3467                 ByteData paintData) native 'Canvas_drawOval';
3468
3469  /// Draws a circle centered at the point given by the first argument and
3470  /// that has the radius given by the second argument, with the [Paint] given in
3471  /// the third argument. Whether the circle is filled or stroked (or both) is
3472  /// controlled by [Paint.style].
3473  void drawCircle(Offset c, double radius, Paint paint) {
3474    assert(_offsetIsValid(c));
3475    assert(paint != null);
3476    _drawCircle(c.dx, c.dy, radius, paint._objects, paint._data);
3477  }
3478  void _drawCircle(double x,
3479                   double y,
3480                   double radius,
3481                   List<dynamic> paintObjects,
3482                   ByteData paintData) native 'Canvas_drawCircle';
3483
3484  /// Draw an arc scaled to fit inside the given rectangle. It starts from
3485  /// startAngle radians around the oval up to startAngle + sweepAngle
3486  /// radians around the oval, with zero radians being the point on
3487  /// the right hand side of the oval that crosses the horizontal line
3488  /// that intersects the center of the rectangle and with positive
3489  /// angles going clockwise around the oval. If useCenter is true, the arc is
3490  /// closed back to the center, forming a circle sector. Otherwise, the arc is
3491  /// not closed, forming a circle segment.
3492  ///
3493  /// This method is optimized for drawing arcs and should be faster than [Path.arcTo].
3494  void drawArc(Rect rect, double startAngle, double sweepAngle, bool useCenter, Paint paint) {
3495    assert(_rectIsValid(rect));
3496    assert(paint != null);
3497    _drawArc(rect.left, rect.top, rect.right, rect.bottom, startAngle,
3498             sweepAngle, useCenter, paint._objects, paint._data);
3499  }
3500  void _drawArc(double left,
3501                double top,
3502                double right,
3503                double bottom,
3504                double startAngle,
3505                double sweepAngle,
3506                bool useCenter,
3507                List<dynamic> paintObjects,
3508                ByteData paintData) native 'Canvas_drawArc';
3509
3510  /// Draws the given [Path] with the given [Paint]. Whether this shape is
3511  /// filled or stroked (or both) is controlled by [Paint.style]. If the path is
3512  /// filled, then sub-paths within it are implicitly closed (see [Path.close]).
3513  void drawPath(Path path, Paint paint) {
3514    assert(path != null); // path is checked on the engine side
3515    assert(paint != null);
3516    _drawPath(path, paint._objects, paint._data);
3517  }
3518  void _drawPath(Path path,
3519                 List<dynamic> paintObjects,
3520                 ByteData paintData) native 'Canvas_drawPath';
3521
3522  /// Draws the given [Image] into the canvas with its top-left corner at the
3523  /// given [Offset]. The image is composited into the canvas using the given [Paint].
3524  void drawImage(Image image, Offset p, Paint paint) {
3525    assert(image != null); // image is checked on the engine side
3526    assert(_offsetIsValid(p));
3527    assert(paint != null);
3528    _drawImage(image, p.dx, p.dy, paint._objects, paint._data);
3529  }
3530  void _drawImage(Image image,
3531                  double x,
3532                  double y,
3533                  List<dynamic> paintObjects,
3534                  ByteData paintData) native 'Canvas_drawImage';
3535
3536  /// Draws the subset of the given image described by the `src` argument into
3537  /// the canvas in the axis-aligned rectangle given by the `dst` argument.
3538  ///
3539  /// This might sample from outside the `src` rect by up to half the width of
3540  /// an applied filter.
3541  ///
3542  /// Multiple calls to this method with different arguments (from the same
3543  /// image) can be batched into a single call to [drawAtlas] to improve
3544  /// performance.
3545  void drawImageRect(Image image, Rect src, Rect dst, Paint paint) {
3546    assert(image != null); // image is checked on the engine side
3547    assert(_rectIsValid(src));
3548    assert(_rectIsValid(dst));
3549    assert(paint != null);
3550    _drawImageRect(image,
3551                   src.left,
3552                   src.top,
3553                   src.right,
3554                   src.bottom,
3555                   dst.left,
3556                   dst.top,
3557                   dst.right,
3558                   dst.bottom,
3559                   paint._objects,
3560                   paint._data);
3561  }
3562  void _drawImageRect(Image image,
3563                      double srcLeft,
3564                      double srcTop,
3565                      double srcRight,
3566                      double srcBottom,
3567                      double dstLeft,
3568                      double dstTop,
3569                      double dstRight,
3570                      double dstBottom,
3571                      List<dynamic> paintObjects,
3572                      ByteData paintData) native 'Canvas_drawImageRect';
3573
3574  /// Draws the given [Image] into the canvas using the given [Paint].
3575  ///
3576  /// The image is drawn in nine portions described by splitting the image by
3577  /// drawing two horizontal lines and two vertical lines, where the `center`
3578  /// argument describes the rectangle formed by the four points where these
3579  /// four lines intersect each other. (This forms a 3-by-3 grid of regions,
3580  /// the center region being described by the `center` argument.)
3581  ///
3582  /// The four regions in the corners are drawn, without scaling, in the four
3583  /// corners of the destination rectangle described by `dst`. The remaining
3584  /// five regions are drawn by stretching them to fit such that they exactly
3585  /// cover the destination rectangle while maintaining their relative
3586  /// positions.
3587  void drawImageNine(Image image, Rect center, Rect dst, Paint paint) {
3588    assert(image != null); // image is checked on the engine side
3589    assert(_rectIsValid(center));
3590    assert(_rectIsValid(dst));
3591    assert(paint != null);
3592    _drawImageNine(image,
3593                   center.left,
3594                   center.top,
3595                   center.right,
3596                   center.bottom,
3597                   dst.left,
3598                   dst.top,
3599                   dst.right,
3600                   dst.bottom,
3601                   paint._objects,
3602                   paint._data);
3603  }
3604  void _drawImageNine(Image image,
3605                      double centerLeft,
3606                      double centerTop,
3607                      double centerRight,
3608                      double centerBottom,
3609                      double dstLeft,
3610                      double dstTop,
3611                      double dstRight,
3612                      double dstBottom,
3613                      List<dynamic> paintObjects,
3614                      ByteData paintData) native 'Canvas_drawImageNine';
3615
3616  /// Draw the given picture onto the canvas. To create a picture, see
3617  /// [PictureRecorder].
3618  void drawPicture(Picture picture) {
3619    assert(picture != null); // picture is checked on the engine side
3620    _drawPicture(picture);
3621  }
3622  void _drawPicture(Picture picture) native 'Canvas_drawPicture';
3623
3624  /// Draws the text in the given [Paragraph] into this canvas at the given
3625  /// [Offset].
3626  ///
3627  /// The [Paragraph] object must have had [Paragraph.layout] called on it
3628  /// first.
3629  ///
3630  /// To align the text, set the `textAlign` on the [ParagraphStyle] object
3631  /// passed to the [new ParagraphBuilder] constructor. For more details see
3632  /// [TextAlign] and the discussion at [new ParagraphStyle].
3633  ///
3634  /// If the text is left aligned or justified, the left margin will be at the
3635  /// position specified by the `offset` argument's [Offset.dx] coordinate.
3636  ///
3637  /// If the text is right aligned or justified, the right margin will be at the
3638  /// position described by adding the [ParagraphConstraints.width] given to
3639  /// [Paragraph.layout], to the `offset` argument's [Offset.dx] coordinate.
3640  ///
3641  /// If the text is centered, the centering axis will be at the position
3642  /// described by adding half of the [ParagraphConstraints.width] given to
3643  /// [Paragraph.layout], to the `offset` argument's [Offset.dx] coordinate.
3644  void drawParagraph(Paragraph paragraph, Offset offset) {
3645    assert(paragraph != null);
3646    assert(_offsetIsValid(offset));
3647    paragraph._paint(this, offset.dx, offset.dy);
3648  }
3649
3650  /// Draws a sequence of points according to the given [PointMode].
3651  ///
3652  /// The `points` argument is interpreted as offsets from the origin.
3653  ///
3654  /// See also:
3655  ///
3656  ///  * [drawRawPoints], which takes `points` as a [Float32List] rather than a
3657  ///    [List<Offset>].
3658  void drawPoints(PointMode pointMode, List<Offset> points, Paint paint) {
3659    assert(pointMode != null);
3660    assert(points != null);
3661    assert(paint != null);
3662    _drawPoints(paint._objects, paint._data, pointMode.index, _encodePointList(points));
3663  }
3664
3665  /// Draws a sequence of points according to the given [PointMode].
3666  ///
3667  /// The `points` argument is interpreted as a list of pairs of floating point
3668  /// numbers, where each pair represents an x and y offset from the origin.
3669  ///
3670  /// See also:
3671  ///
3672  ///  * [drawPoints], which takes `points` as a [List<Offset>] rather than a
3673  ///    [List<Float32List>].
3674  void drawRawPoints(PointMode pointMode, Float32List points, Paint paint) {
3675    assert(pointMode != null);
3676    assert(points != null);
3677    assert(paint != null);
3678    if (points.length % 2 != 0)
3679      throw ArgumentError('"points" must have an even number of values.');
3680    _drawPoints(paint._objects, paint._data, pointMode.index, points);
3681  }
3682
3683  void _drawPoints(List<dynamic> paintObjects,
3684                   ByteData paintData,
3685                   int pointMode,
3686                   Float32List points) native 'Canvas_drawPoints';
3687
3688  void drawVertices(Vertices vertices, BlendMode blendMode, Paint paint) {
3689    assert(vertices != null); // vertices is checked on the engine side
3690    assert(paint != null);
3691    assert(blendMode != null);
3692    _drawVertices(vertices, blendMode.index, paint._objects, paint._data);
3693  }
3694  void _drawVertices(Vertices vertices,
3695                     int blendMode,
3696                     List<dynamic> paintObjects,
3697                     ByteData paintData) native 'Canvas_drawVertices';
3698
3699  //
3700  // See also:
3701  //
3702  //  * [drawRawAtlas], which takes its arguments as typed data lists rather
3703  //    than objects.
3704  void drawAtlas(Image atlas,
3705                 List<RSTransform> transforms,
3706                 List<Rect> rects,
3707                 List<Color> colors,
3708                 BlendMode blendMode,
3709                 Rect cullRect,
3710                 Paint paint) {
3711    assert(atlas != null); // atlas is checked on the engine side
3712    assert(transforms != null);
3713    assert(rects != null);
3714    assert(colors != null);
3715    assert(blendMode != null);
3716    assert(paint != null);
3717
3718    final int rectCount = rects.length;
3719    if (transforms.length != rectCount)
3720      throw ArgumentError('"transforms" and "rects" lengths must match.');
3721    if (colors.isNotEmpty && colors.length != rectCount)
3722      throw ArgumentError('If non-null, "colors" length must match that of "transforms" and "rects".');
3723
3724    final Float32List rstTransformBuffer = Float32List(rectCount * 4);
3725    final Float32List rectBuffer = Float32List(rectCount * 4);
3726
3727    for (int i = 0; i < rectCount; ++i) {
3728      final int index0 = i * 4;
3729      final int index1 = index0 + 1;
3730      final int index2 = index0 + 2;
3731      final int index3 = index0 + 3;
3732      final RSTransform rstTransform = transforms[i];
3733      final Rect rect = rects[i];
3734      assert(_rectIsValid(rect));
3735      rstTransformBuffer[index0] = rstTransform.scos;
3736      rstTransformBuffer[index1] = rstTransform.ssin;
3737      rstTransformBuffer[index2] = rstTransform.tx;
3738      rstTransformBuffer[index3] = rstTransform.ty;
3739      rectBuffer[index0] = rect.left;
3740      rectBuffer[index1] = rect.top;
3741      rectBuffer[index2] = rect.right;
3742      rectBuffer[index3] = rect.bottom;
3743    }
3744
3745    final Int32List colorBuffer = colors.isEmpty ? null : _encodeColorList(colors);
3746    final Float32List cullRectBuffer = cullRect?._value32;
3747
3748    _drawAtlas(
3749      paint._objects, paint._data, atlas, rstTransformBuffer, rectBuffer,
3750      colorBuffer, blendMode.index, cullRectBuffer
3751    );
3752  }
3753
3754  //
3755  // The `rstTransforms` argument is interpreted as a list of four-tuples, with
3756  // each tuple being ([RSTransform.scos], [RSTransform.ssin],
3757  // [RSTransform.tx], [RSTransform.ty]).
3758  //
3759  // The `rects` argument is interpreted as a list of four-tuples, with each
3760  // tuple being ([Rect.left], [Rect.top], [Rect.right], [Rect.bottom]).
3761  //
3762  // The `colors` argument, which can be null, is interpreted as a list of
3763  // 32-bit colors, with the same packing as [Color.value].
3764  //
3765  // See also:
3766  //
3767  //  * [drawAtlas], which takes its arguments as objects rather than typed
3768  //    data lists.
3769  void drawRawAtlas(Image atlas,
3770                    Float32List rstTransforms,
3771                    Float32List rects,
3772                    Int32List colors,
3773                    BlendMode blendMode,
3774                    Rect cullRect,
3775                    Paint paint) {
3776    assert(atlas != null); // atlas is checked on the engine side
3777    assert(rstTransforms != null);
3778    assert(rects != null);
3779    assert(colors != null);
3780    assert(blendMode != null);
3781    assert(paint != null);
3782
3783    final int rectCount = rects.length;
3784    if (rstTransforms.length != rectCount)
3785      throw ArgumentError('"rstTransforms" and "rects" lengths must match.');
3786    if (rectCount % 4 != 0)
3787      throw ArgumentError('"rstTransforms" and "rects" lengths must be a multiple of four.');
3788    if (colors != null && colors.length * 4 != rectCount)
3789      throw ArgumentError('If non-null, "colors" length must be one fourth the length of "rstTransforms" and "rects".');
3790
3791    _drawAtlas(
3792      paint._objects, paint._data, atlas, rstTransforms, rects,
3793      colors, blendMode.index, cullRect?._value32
3794    );
3795  }
3796
3797  void _drawAtlas(List<dynamic> paintObjects,
3798                  ByteData paintData,
3799                  Image atlas,
3800                  Float32List rstTransforms,
3801                  Float32List rects,
3802                  Int32List colors,
3803                  int blendMode,
3804                  Float32List cullRect) native 'Canvas_drawAtlas';
3805
3806  /// Draws a shadow for a [Path] representing the given material elevation.
3807  ///
3808  /// The `transparentOccluder` argument should be true if the occluding object
3809  /// is not opaque.
3810  ///
3811  /// The arguments must not be null.
3812  void drawShadow(Path path, Color color, double elevation, bool transparentOccluder) {
3813    assert(path != null); // path is checked on the engine side
3814    assert(color != null);
3815    assert(transparentOccluder != null);
3816    _drawShadow(path, color.value, elevation, transparentOccluder);
3817  }
3818  void _drawShadow(Path path,
3819                   int color,
3820                   double elevation,
3821                   bool transparentOccluder) native 'Canvas_drawShadow';
3822}
3823
3824/// An object representing a sequence of recorded graphical operations.
3825///
3826/// To create a [Picture], use a [PictureRecorder].
3827///
3828/// A [Picture] can be placed in a [Scene] using a [SceneBuilder], via
3829/// the [SceneBuilder.addPicture] method. A [Picture] can also be
3830/// drawn into a [Canvas], using the [Canvas.drawPicture] method.
3831@pragma('vm:entry-point')
3832class Picture extends NativeFieldWrapperClass2 {
3833  /// This class is created by the engine, and should not be instantiated
3834  /// or extended directly.
3835  ///
3836  /// To create a [Picture], use a [PictureRecorder].
3837  @pragma('vm:entry-point')
3838  Picture._();
3839
3840  /// Creates an image from this picture.
3841  ///
3842  /// The picture is rasterized using the number of pixels specified by the
3843  /// given width and height.
3844  ///
3845  /// Although the image is returned synchronously, the picture is actually
3846  /// rasterized the first time the image is drawn and then cached.
3847  Future<Image> toImage(int width, int height) {
3848    if (width <= 0 || height <= 0)
3849      throw Exception('Invalid image dimensions.');
3850    return _futurize(
3851      (_Callback<Image> callback) => _toImage(width, height, callback)
3852    );
3853  }
3854
3855  String _toImage(int width, int height, _Callback<Image> callback) native 'Picture_toImage';
3856
3857  /// Release the resources used by this object. The object is no longer usable
3858  /// after this method is called.
3859  void dispose() native 'Picture_dispose';
3860
3861  /// Returns the approximate number of bytes allocated for this object.
3862  ///
3863  /// The actual size of this picture may be larger, particularly if it contains
3864  /// references to image or other large objects.
3865  int get approximateBytesUsed native 'Picture_GetAllocationSize';
3866}
3867
3868/// Records a [Picture] containing a sequence of graphical operations.
3869///
3870/// To begin recording, construct a [Canvas] to record the commands.
3871/// To end recording, use the [PictureRecorder.endRecording] method.
3872class PictureRecorder extends NativeFieldWrapperClass2 {
3873  /// Creates a new idle PictureRecorder. To associate it with a
3874  /// [Canvas] and begin recording, pass this [PictureRecorder] to the
3875  /// [Canvas] constructor.
3876  @pragma('vm:entry-point')
3877  PictureRecorder() { _constructor(); }
3878  void _constructor() native 'PictureRecorder_constructor';
3879
3880  /// Whether this object is currently recording commands.
3881  ///
3882  /// Specifically, this returns true if a [Canvas] object has been
3883  /// created to record commands and recording has not yet ended via a
3884  /// call to [endRecording], and false if either this
3885  /// [PictureRecorder] has not yet been associated with a [Canvas],
3886  /// or the [endRecording] method has already been called.
3887  bool get isRecording native 'PictureRecorder_isRecording';
3888
3889  /// Finishes recording graphical operations.
3890  ///
3891  /// Returns a picture containing the graphical operations that have been
3892  /// recorded thus far. After calling this function, both the picture recorder
3893  /// and the canvas objects are invalid and cannot be used further.
3894  ///
3895  /// Returns null if the PictureRecorder is not associated with a canvas.
3896  Picture endRecording() native 'PictureRecorder_endRecording';
3897}
3898
3899/// A single shadow.
3900///
3901/// Multiple shadows are stacked together in a [TextStyle].
3902class Shadow {
3903  /// Construct a shadow.
3904  ///
3905  /// The default shadow is a black shadow with zero offset and zero blur.
3906  /// Default shadows should be completely covered by the casting element,
3907  /// and not be visible.
3908  ///
3909  /// Transparency should be adjusted through the [color] alpha.
3910  ///
3911  /// Shadow order matters due to compositing multiple translucent objects not
3912  /// being commutative.
3913  const Shadow({
3914    this.color = const Color(_kColorDefault),
3915    this.offset = Offset.zero,
3916    this.blurRadius = 0.0,
3917  }) : assert(color != null, 'Text shadow color was null.'),
3918       assert(offset != null, 'Text shadow offset was null.'),
3919       assert(blurRadius >= 0.0, 'Text shadow blur radius should be non-negative.');
3920
3921  static const int _kColorDefault = 0xFF000000;
3922  // Constants for shadow encoding.
3923  static const int _kBytesPerShadow = 16;
3924  static const int _kColorOffset = 0 << 2;
3925  static const int _kXOffset = 1 << 2;
3926  static const int _kYOffset = 2 << 2;
3927  static const int _kBlurOffset = 3 << 2;
3928
3929  /// Color that the shadow will be drawn with.
3930  ///
3931  /// The shadows are shapes composited directly over the base canvas, and do not
3932  /// represent optical occlusion.
3933  final Color color;
3934
3935  /// The displacement of the shadow from the casting element.
3936  ///
3937  /// Positive x/y offsets will shift the shadow to the right and down, while
3938  /// negative offsets shift the shadow to the left and up. The offsets are
3939  /// relative to the position of the element that is casting it.
3940  final Offset offset;
3941
3942  /// The standard deviation of the Gaussian to convolve with the shadow's shape.
3943  final double blurRadius;
3944
3945  /// Converts a blur radius in pixels to sigmas.
3946  ///
3947  /// See the sigma argument to [MaskFilter.blur].
3948  ///
3949  // See SkBlurMask::ConvertRadiusToSigma().
3950  // <https://github.com/google/skia/blob/bb5b77db51d2e149ee66db284903572a5aac09be/src/effects/SkBlurMask.cpp#L23>
3951  static double convertRadiusToSigma(double radius) {
3952    return radius * 0.57735 + 0.5;
3953  }
3954
3955  /// The [blurRadius] in sigmas instead of logical pixels.
3956  ///
3957  /// See the sigma argument to [MaskFilter.blur].
3958  double get blurSigma => convertRadiusToSigma(blurRadius);
3959
3960  /// Create the [Paint] object that corresponds to this shadow description.
3961  ///
3962  /// The [offset] is not represented in the [Paint] object.
3963  /// To honor this as well, the shape should be translated by [offset] before
3964  /// being filled using this [Paint].
3965  ///
3966  /// This class does not provide a way to disable shadows to avoid
3967  /// inconsistencies in shadow blur rendering, primarily as a method of
3968  /// reducing test flakiness. [toPaint] should be overridden in subclasses to
3969  /// provide this functionality.
3970  Paint toPaint() {
3971    return Paint()
3972      ..color = color
3973      ..maskFilter = MaskFilter.blur(BlurStyle.normal, blurSigma);
3974  }
3975
3976  /// Returns a new shadow with its [offset] and [blurRadius] scaled by the given
3977  /// factor.
3978  Shadow scale(double factor) {
3979    return Shadow(
3980      color: color,
3981      offset: offset * factor,
3982      blurRadius: blurRadius * factor,
3983    );
3984  }
3985
3986  /// Linearly interpolate between two shadows.
3987  ///
3988  /// If either shadow is null, this function linearly interpolates from a
3989  /// a shadow that matches the other shadow in color but has a zero
3990  /// offset and a zero blurRadius.
3991  ///
3992  /// {@template dart.ui.shadow.lerp}
3993  /// The `t` argument represents position on the timeline, with 0.0 meaning
3994  /// that the interpolation has not started, returning `a` (or something
3995  /// equivalent to `a`), 1.0 meaning that the interpolation has finished,
3996  /// returning `b` (or something equivalent to `b`), and values in between
3997  /// meaning that the interpolation is at the relevant point on the timeline
3998  /// between `a` and `b`. The interpolation can be extrapolated beyond 0.0 and
3999  /// 1.0, so negative values and values greater than 1.0 are valid (and can
4000  /// easily be generated by curves such as [Curves.elasticInOut]).
4001  ///
4002  /// Values for `t` are usually obtained from an [Animation<double>], such as
4003  /// an [AnimationController].
4004  /// {@endtemplate}
4005  static Shadow lerp(Shadow a, Shadow b, double t) {
4006    assert(t != null);
4007    if (a == null && b == null)
4008      return null;
4009    if (a == null)
4010      return b.scale(t);
4011    if (b == null)
4012      return a.scale(1.0 - t);
4013    return Shadow(
4014      color: Color.lerp(a.color, b.color, t),
4015      offset: Offset.lerp(a.offset, b.offset, t),
4016      blurRadius: lerpDouble(a.blurRadius, b.blurRadius, t),
4017    );
4018  }
4019
4020  /// Linearly interpolate between two lists of shadows.
4021  ///
4022  /// If the lists differ in length, excess items are lerped with null.
4023  ///
4024  /// {@macro dart.ui.shadow.lerp}
4025  static List<Shadow> lerpList(List<Shadow> a, List<Shadow> b, double t) {
4026    assert(t != null);
4027    if (a == null && b == null)
4028      return null;
4029    a ??= <Shadow>[];
4030    b ??= <Shadow>[];
4031    final List<Shadow> result = <Shadow>[];
4032    final int commonLength = math.min(a.length, b.length);
4033    for (int i = 0; i < commonLength; i += 1)
4034      result.add(Shadow.lerp(a[i], b[i], t));
4035    for (int i = commonLength; i < a.length; i += 1)
4036      result.add(a[i].scale(1.0 - t));
4037    for (int i = commonLength; i < b.length; i += 1)
4038      result.add(b[i].scale(t));
4039    return result;
4040  }
4041
4042  @override
4043  bool operator ==(dynamic other) {
4044    if (identical(this, other))
4045      return true;
4046    if (other is! Shadow)
4047      return false;
4048    final Shadow typedOther = other;
4049    return color == typedOther.color &&
4050           offset == typedOther.offset &&
4051           blurRadius == typedOther.blurRadius;
4052  }
4053
4054  @override
4055  int get hashCode => hashValues(color, offset, blurRadius);
4056
4057  // Serialize [shadows] into ByteData. The format is a single uint_32_t at
4058  // the beginning indicating the number of shadows, followed by _kBytesPerShadow
4059  // bytes for each shadow.
4060  static ByteData _encodeShadows(List<Shadow> shadows) {
4061    if (shadows == null)
4062      return ByteData(0);
4063
4064    final int byteCount = shadows.length * _kBytesPerShadow;
4065    final ByteData shadowsData = ByteData(byteCount);
4066
4067    int shadowOffset = 0;
4068    for (int shadowIndex = 0; shadowIndex < shadows.length; ++shadowIndex) {
4069      final Shadow shadow = shadows[shadowIndex];
4070      if (shadow == null)
4071        continue;
4072      shadowOffset = shadowIndex * _kBytesPerShadow;
4073
4074      shadowsData.setInt32(_kColorOffset + shadowOffset,
4075        shadow.color.value ^ Shadow._kColorDefault, _kFakeHostEndian);
4076
4077      shadowsData.setFloat32(_kXOffset + shadowOffset,
4078        shadow.offset.dx, _kFakeHostEndian);
4079
4080      shadowsData.setFloat32(_kYOffset + shadowOffset,
4081        shadow.offset.dy, _kFakeHostEndian);
4082
4083      shadowsData.setFloat32(_kBlurOffset + shadowOffset,
4084        shadow.blurRadius, _kFakeHostEndian);
4085    }
4086
4087    return shadowsData;
4088  }
4089
4090  @override
4091  String toString() => 'TextShadow($color, $offset, $blurRadius)';
4092}
4093
4094/// Generic callback signature, used by [_futurize].
4095typedef _Callback<T> = void Function(T result);
4096
4097/// Signature for a method that receives a [_Callback].
4098///
4099/// Return value should be null on success, and a string error message on
4100/// failure.
4101typedef _Callbacker<T> = String Function(_Callback<T> callback);
4102
4103/// Converts a method that receives a value-returning callback to a method that
4104/// returns a Future.
4105///
4106/// Return a [String] to cause an [Exception] to be synchronously thrown with
4107/// that string as a message.
4108///
4109/// If the callback is called with null, the future completes with an error.
4110///
4111/// Example usage:
4112///
4113/// ```dart
4114/// typedef IntCallback = void Function(int result);
4115///
4116/// String _doSomethingAndCallback(IntCallback callback) {
4117///   Timer(new Duration(seconds: 1), () { callback(1); });
4118/// }
4119///
4120/// Future<int> doSomething() {
4121///   return _futurize(_doSomethingAndCallback);
4122/// }
4123/// ```
4124Future<T> _futurize<T>(_Callbacker<T> callbacker) {
4125  final Completer<T> completer = Completer<T>.sync();
4126  final String error = callbacker((T t) {
4127    if (t == null) {
4128      completer.completeError(Exception('operation failed'));
4129    } else {
4130      completer.complete(t);
4131    }
4132  });
4133  if (error != null)
4134    throw Exception(error);
4135  return completer.future;
4136}
4137