1 // Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3 use crate::{f32::math, neon::*, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec3, Vec4};
4
5 use core::fmt;
6 use core::iter::{Product, Sum};
7 use core::{f32, ops::*};
8
9 use core::arch::aarch64::*;
10
11 #[repr(C)]
12 union UnionCast {
13 a: [f32; 4],
14 v: Vec3A,
15 }
16
17 /// Creates a 3-dimensional vector.
18 #[inline(always)]
19 #[must_use]
vec3a(x: f32, y: f32, z: f32) -> Vec3A20 pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
21 Vec3A::new(x, y, z)
22 }
23
24 /// A 3-dimensional vector.
25 ///
26 /// SIMD vector types are used for storage on supported platforms for better
27 /// performance than the [`Vec3`] type.
28 ///
29 /// It is possible to convert between [`Vec3`] and [`Vec3A`] types using [`From`]
30 /// or [`Into`] trait implementations.
31 ///
32 /// This type is 16 byte aligned.
33 #[derive(Clone, Copy)]
34 #[repr(transparent)]
35 pub struct Vec3A(pub(crate) float32x4_t);
36
37 impl Vec3A {
38 /// All zeroes.
39 pub const ZERO: Self = Self::splat(0.0);
40
41 /// All ones.
42 pub const ONE: Self = Self::splat(1.0);
43
44 /// All negative ones.
45 pub const NEG_ONE: Self = Self::splat(-1.0);
46
47 /// All `f32::MIN`.
48 pub const MIN: Self = Self::splat(f32::MIN);
49
50 /// All `f32::MAX`.
51 pub const MAX: Self = Self::splat(f32::MAX);
52
53 /// All `f32::NAN`.
54 pub const NAN: Self = Self::splat(f32::NAN);
55
56 /// All `f32::INFINITY`.
57 pub const INFINITY: Self = Self::splat(f32::INFINITY);
58
59 /// All `f32::NEG_INFINITY`.
60 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
61
62 /// A unit vector pointing along the positive X axis.
63 pub const X: Self = Self::new(1.0, 0.0, 0.0);
64
65 /// A unit vector pointing along the positive Y axis.
66 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
67
68 /// A unit vector pointing along the positive Z axis.
69 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
70
71 /// A unit vector pointing along the negative X axis.
72 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
73
74 /// A unit vector pointing along the negative Y axis.
75 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
76
77 /// A unit vector pointing along the negative Z axis.
78 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
79
80 /// The unit axes.
81 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
82
83 /// Creates a new vector.
84 #[inline(always)]
85 #[must_use]
new(x: f32, y: f32, z: f32) -> Self86 pub const fn new(x: f32, y: f32, z: f32) -> Self {
87 unsafe { UnionCast { a: [x, y, z, z] }.v }
88 }
89
90 /// Creates a vector with all elements set to `v`.
91 #[inline]
92 #[must_use]
splat(v: f32) -> Self93 pub const fn splat(v: f32) -> Self {
94 unsafe { UnionCast { a: [v; 4] }.v }
95 }
96
97 /// Returns a vector containing each element of `self` modified by a mapping function `f`.
98 #[inline]
99 #[must_use]
map<F>(self, f: F) -> Self where F: Fn(f32) -> f32,100 pub fn map<F>(self, f: F) -> Self
101 where
102 F: Fn(f32) -> f32,
103 {
104 Self::new(f(self.x), f(self.y), f(self.z))
105 }
106
107 /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
108 /// for each element of `self`.
109 ///
110 /// A true element in the mask uses the corresponding element from `if_true`, and false
111 /// uses the element from `if_false`.
112 #[inline]
113 #[must_use]
select(mask: BVec3A, if_true: Self, if_false: Self) -> Self114 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
115 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
116 }
117
118 /// Creates a new vector from an array.
119 #[inline]
120 #[must_use]
from_array(a: [f32; 3]) -> Self121 pub const fn from_array(a: [f32; 3]) -> Self {
122 Self::new(a[0], a[1], a[2])
123 }
124
125 /// `[x, y, z]`
126 #[inline]
127 #[must_use]
to_array(&self) -> [f32; 3]128 pub const fn to_array(&self) -> [f32; 3] {
129 unsafe { *(self as *const Vec3A as *const [f32; 3]) }
130 }
131
132 /// Creates a vector from the first 3 values in `slice`.
133 ///
134 /// # Panics
135 ///
136 /// Panics if `slice` is less than 3 elements long.
137 #[inline]
138 #[must_use]
from_slice(slice: &[f32]) -> Self139 pub const fn from_slice(slice: &[f32]) -> Self {
140 assert!(slice.len() >= 3);
141 Self::new(slice[0], slice[1], slice[2])
142 }
143
144 /// Writes the elements of `self` to the first 3 elements in `slice`.
145 ///
146 /// # Panics
147 ///
148 /// Panics if `slice` is less than 3 elements long.
149 #[inline]
write_to_slice(self, slice: &mut [f32])150 pub fn write_to_slice(self, slice: &mut [f32]) {
151 slice[..3].copy_from_slice(&self.to_array());
152 }
153
154 /// Creates a [`Vec3A`] from the `x`, `y` and `z` elements of `self` discarding `w`.
155 ///
156 /// On architectures where SIMD is supported such as SSE2 on `x86_64` this conversion is a noop.
157 #[inline]
158 #[must_use]
from_vec4(v: Vec4) -> Self159 pub fn from_vec4(v: Vec4) -> Self {
160 Self(v.0)
161 }
162
163 /// Creates a 4D vector from `self` and the given `w` value.
164 #[inline]
165 #[must_use]
extend(self, w: f32) -> Vec4166 pub fn extend(self, w: f32) -> Vec4 {
167 Vec4::new(self.x, self.y, self.z, w)
168 }
169
170 /// Creates a 2D vector from the `x` and `y` elements of `self`, discarding `z`.
171 ///
172 /// Truncation may also be performed by using [`self.xy()`][crate::swizzles::Vec3Swizzles::xy()].
173 #[inline]
174 #[must_use]
truncate(self) -> Vec2175 pub fn truncate(self) -> Vec2 {
176 use crate::swizzles::Vec3Swizzles;
177 self.xy()
178 }
179
180 /// Creates a 3D vector from `self` with the given value of `x`.
181 #[inline]
182 #[must_use]
with_x(mut self, x: f32) -> Self183 pub fn with_x(mut self, x: f32) -> Self {
184 self.x = x;
185 self
186 }
187
188 /// Creates a 3D vector from `self` with the given value of `y`.
189 #[inline]
190 #[must_use]
with_y(mut self, y: f32) -> Self191 pub fn with_y(mut self, y: f32) -> Self {
192 self.y = y;
193 self
194 }
195
196 /// Creates a 3D vector from `self` with the given value of `z`.
197 #[inline]
198 #[must_use]
with_z(mut self, z: f32) -> Self199 pub fn with_z(mut self, z: f32) -> Self {
200 self.z = z;
201 self
202 }
203
204 /// Computes the dot product of `self` and `rhs`.
205 #[inline]
206 #[must_use]
dot(self, rhs: Self) -> f32207 pub fn dot(self, rhs: Self) -> f32 {
208 // this was faster than intrinsics in testing
209 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z)
210 }
211
212 /// Returns a vector where every component is the dot product of `self` and `rhs`.
213 #[inline]
214 #[must_use]
dot_into_vec(self, rhs: Self) -> Self215 pub fn dot_into_vec(self, rhs: Self) -> Self {
216 Self(unsafe { dot3_into_f32x4(self.0, rhs.0) })
217 }
218
219 /// Computes the cross product of `self` and `rhs`.
220 #[inline]
221 #[must_use]
cross(self, rhs: Self) -> Self222 pub fn cross(self, rhs: Self) -> Self {
223 unsafe {
224 // Implementation taken from Realtime Math
225 let lhs = self.0;
226 let rhs = rhs.0;
227 // cross(a, b) = (a.yzx * b.zxy) - (a.zxy * b.yzx)
228 let lhs_yzwx = vextq_f32(lhs, lhs, 1);
229 let rhs_wxyz = vextq_f32(rhs, rhs, 3);
230
231 let lhs_yzx = vsetq_lane_f32(vgetq_lane_f32(lhs, 0), lhs_yzwx, 2);
232 let rhs_zxy = vsetq_lane_f32(vgetq_lane_f32(rhs, 2), rhs_wxyz, 0);
233
234 // part_a = (a.yzx * b.zxy)
235 let part_a = vmulq_f32(lhs_yzx, rhs_zxy);
236
237 let lhs_wxyz = vextq_f32(lhs, lhs, 3);
238 let rhs_yzwx = vextq_f32(rhs, rhs, 1);
239 let lhs_zxy = vsetq_lane_f32(vgetq_lane_f32(lhs, 2), lhs_wxyz, 0);
240 let rhs_yzx = vsetq_lane_f32(vgetq_lane_f32(rhs, 0), rhs_yzwx, 2);
241
242 // result = part_a - (a.zxy * b.yzx)
243 let result = vmlsq_f32(part_a, lhs_zxy, rhs_yzx);
244 Self(result)
245 }
246 }
247
248 /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
249 ///
250 /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`.
251 #[inline]
252 #[must_use]
min(self, rhs: Self) -> Self253 pub fn min(self, rhs: Self) -> Self {
254 Self(unsafe { vminq_f32(self.0, rhs.0) })
255 }
256
257 /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
258 ///
259 /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`.
260 #[inline]
261 #[must_use]
max(self, rhs: Self) -> Self262 pub fn max(self, rhs: Self) -> Self {
263 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
264 }
265
266 /// Component-wise clamping of values, similar to [`f32::clamp`].
267 ///
268 /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
269 ///
270 /// # Panics
271 ///
272 /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
273 #[inline]
274 #[must_use]
clamp(self, min: Self, max: Self) -> Self275 pub fn clamp(self, min: Self, max: Self) -> Self {
276 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
277 self.max(min).min(max)
278 }
279
280 /// Returns the horizontal minimum of `self`.
281 ///
282 /// In other words this computes `min(x, y, ..)`.
283 #[inline]
284 #[must_use]
min_element(self) -> f32285 pub fn min_element(self) -> f32 {
286 self.x.min(self.y.min(self.z))
287 }
288
289 /// Returns the horizontal maximum of `self`.
290 ///
291 /// In other words this computes `max(x, y, ..)`.
292 #[inline]
293 #[must_use]
max_element(self) -> f32294 pub fn max_element(self) -> f32 {
295 self.x.max(self.y.max(self.z))
296 }
297
298 /// Returns the sum of all elements of `self`.
299 ///
300 /// In other words, this computes `self.x + self.y + ..`.
301 #[inline]
302 #[must_use]
element_sum(self) -> f32303 pub fn element_sum(self) -> f32 {
304 unsafe { vaddvq_f32(vsetq_lane_f32(0.0, self.0, 3)) }
305 }
306
307 /// Returns the product of all elements of `self`.
308 ///
309 /// In other words, this computes `self.x * self.y * ..`.
310 #[inline]
311 #[must_use]
element_product(self) -> f32312 pub fn element_product(self) -> f32 {
313 unsafe {
314 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
315 vmuls_laneq_f32(s, self.0, 2)
316 }
317 }
318
319 /// Returns a vector mask containing the result of a `==` comparison for each element of
320 /// `self` and `rhs`.
321 ///
322 /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
323 /// elements.
324 #[inline]
325 #[must_use]
cmpeq(self, rhs: Self) -> BVec3A326 pub fn cmpeq(self, rhs: Self) -> BVec3A {
327 BVec3A(unsafe { vceqq_f32(self.0, rhs.0) })
328 }
329
330 /// Returns a vector mask containing the result of a `!=` comparison for each element of
331 /// `self` and `rhs`.
332 ///
333 /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
334 /// elements.
335 #[inline]
336 #[must_use]
cmpne(self, rhs: Self) -> BVec3A337 pub fn cmpne(self, rhs: Self) -> BVec3A {
338 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
339 }
340
341 /// Returns a vector mask containing the result of a `>=` comparison for each element of
342 /// `self` and `rhs`.
343 ///
344 /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
345 /// elements.
346 #[inline]
347 #[must_use]
cmpge(self, rhs: Self) -> BVec3A348 pub fn cmpge(self, rhs: Self) -> BVec3A {
349 BVec3A(unsafe { vcgeq_f32(self.0, rhs.0) })
350 }
351
352 /// Returns a vector mask containing the result of a `>` comparison for each element of
353 /// `self` and `rhs`.
354 ///
355 /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
356 /// elements.
357 #[inline]
358 #[must_use]
cmpgt(self, rhs: Self) -> BVec3A359 pub fn cmpgt(self, rhs: Self) -> BVec3A {
360 BVec3A(unsafe { vcgtq_f32(self.0, rhs.0) })
361 }
362
363 /// Returns a vector mask containing the result of a `<=` comparison for each element of
364 /// `self` and `rhs`.
365 ///
366 /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
367 /// elements.
368 #[inline]
369 #[must_use]
cmple(self, rhs: Self) -> BVec3A370 pub fn cmple(self, rhs: Self) -> BVec3A {
371 BVec3A(unsafe { vcleq_f32(self.0, rhs.0) })
372 }
373
374 /// Returns a vector mask containing the result of a `<` comparison for each element of
375 /// `self` and `rhs`.
376 ///
377 /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
378 /// elements.
379 #[inline]
380 #[must_use]
cmplt(self, rhs: Self) -> BVec3A381 pub fn cmplt(self, rhs: Self) -> BVec3A {
382 BVec3A(unsafe { vcltq_f32(self.0, rhs.0) })
383 }
384
385 /// Returns a vector containing the absolute value of each element of `self`.
386 #[inline]
387 #[must_use]
abs(self) -> Self388 pub fn abs(self) -> Self {
389 Self(unsafe { vabsq_f32(self.0) })
390 }
391
392 /// Returns a vector with elements representing the sign of `self`.
393 ///
394 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
395 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
396 /// - `NAN` if the number is `NAN`
397 #[inline]
398 #[must_use]
signum(self) -> Self399 pub fn signum(self) -> Self {
400 let result = Self(unsafe {
401 vreinterpretq_f32_u32(vorrq_u32(
402 vandq_u32(
403 vreinterpretq_u32_f32(self.0),
404 vreinterpretq_u32_f32(Self::NEG_ONE.0),
405 ),
406 vreinterpretq_u32_f32(Self::ONE.0),
407 ))
408 });
409 let mask = self.is_nan_mask();
410 Self::select(mask, self, result)
411 }
412
413 /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
414 #[inline]
415 #[must_use]
copysign(self, rhs: Self) -> Self416 pub fn copysign(self, rhs: Self) -> Self {
417 let mask = Self::splat(-0.0);
418 Self(unsafe {
419 vreinterpretq_f32_u32(vorrq_u32(
420 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
421 vandq_u32(
422 vreinterpretq_u32_f32(self.0),
423 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
424 ),
425 ))
426 })
427 }
428
429 /// Returns a bitmask with the lowest 3 bits set to the sign bits from the elements of `self`.
430 ///
431 /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes
432 /// into the first lowest bit, element `y` into the second, etc.
433 #[inline]
434 #[must_use]
is_negative_bitmask(self) -> u32435 pub fn is_negative_bitmask(self) -> u32 {
436 unsafe {
437 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
438 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
439 let x = vgetq_lane_u32(m, 0) >> 31;
440 let y = vgetq_lane_u32(m, 1) >> 31;
441 let z = vgetq_lane_u32(m, 2) >> 31;
442
443 x | y << 1 | z << 2
444 }
445 }
446
447 /// Returns `true` if, and only if, all elements are finite. If any element is either
448 /// `NaN`, positive or negative infinity, this will return `false`.
449 #[inline]
450 #[must_use]
is_finite(self) -> bool451 pub fn is_finite(self) -> bool {
452 self.is_finite_mask().all()
453 }
454
455 /// Performs `is_finite` on each element of self, returning a vector mask of the results.
456 ///
457 /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
is_finite_mask(self) -> BVec3A458 pub fn is_finite_mask(self) -> BVec3A {
459 BVec3A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
460 }
461
462 /// Returns `true` if any elements are `NaN`.
463 #[inline]
464 #[must_use]
is_nan(self) -> bool465 pub fn is_nan(self) -> bool {
466 self.is_nan_mask().any()
467 }
468
469 /// Performs `is_nan` on each element of self, returning a vector mask of the results.
470 ///
471 /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
472 #[inline]
473 #[must_use]
is_nan_mask(self) -> BVec3A474 pub fn is_nan_mask(self) -> BVec3A {
475 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
476 }
477
478 /// Computes the length of `self`.
479 #[doc(alias = "magnitude")]
480 #[inline]
481 #[must_use]
length(self) -> f32482 pub fn length(self) -> f32 {
483 math::sqrt(self.dot(self))
484 }
485
486 /// Computes the squared length of `self`.
487 ///
488 /// This is faster than `length()` as it avoids a square root operation.
489 #[doc(alias = "magnitude2")]
490 #[inline]
491 #[must_use]
length_squared(self) -> f32492 pub fn length_squared(self) -> f32 {
493 self.dot(self)
494 }
495
496 /// Computes `1.0 / length()`.
497 ///
498 /// For valid results, `self` must _not_ be of length zero.
499 #[inline]
500 #[must_use]
length_recip(self) -> f32501 pub fn length_recip(self) -> f32 {
502 self.length().recip()
503 }
504
505 /// Computes the Euclidean distance between two points in space.
506 #[inline]
507 #[must_use]
distance(self, rhs: Self) -> f32508 pub fn distance(self, rhs: Self) -> f32 {
509 (self - rhs).length()
510 }
511
512 /// Compute the squared euclidean distance between two points in space.
513 #[inline]
514 #[must_use]
distance_squared(self, rhs: Self) -> f32515 pub fn distance_squared(self, rhs: Self) -> f32 {
516 (self - rhs).length_squared()
517 }
518
519 /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
520 #[inline]
521 #[must_use]
div_euclid(self, rhs: Self) -> Self522 pub fn div_euclid(self, rhs: Self) -> Self {
523 Self::new(
524 math::div_euclid(self.x, rhs.x),
525 math::div_euclid(self.y, rhs.y),
526 math::div_euclid(self.z, rhs.z),
527 )
528 }
529
530 /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
531 ///
532 /// [Euclidean division]: f32::rem_euclid
533 #[inline]
534 #[must_use]
rem_euclid(self, rhs: Self) -> Self535 pub fn rem_euclid(self, rhs: Self) -> Self {
536 Self::new(
537 math::rem_euclid(self.x, rhs.x),
538 math::rem_euclid(self.y, rhs.y),
539 math::rem_euclid(self.z, rhs.z),
540 )
541 }
542
543 /// Returns `self` normalized to length 1.0.
544 ///
545 /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
546 ///
547 /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
548 ///
549 /// Panics
550 ///
551 /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
552 #[inline]
553 #[must_use]
normalize(self) -> Self554 pub fn normalize(self) -> Self {
555 #[allow(clippy::let_and_return)]
556 let normalized = self.mul(self.length_recip());
557 glam_assert!(normalized.is_finite());
558 normalized
559 }
560
561 /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
562 ///
563 /// In particular, if the input is zero (or very close to zero), or non-finite,
564 /// the result of this operation will be `None`.
565 ///
566 /// See also [`Self::normalize_or_zero()`].
567 #[inline]
568 #[must_use]
try_normalize(self) -> Option<Self>569 pub fn try_normalize(self) -> Option<Self> {
570 let rcp = self.length_recip();
571 if rcp.is_finite() && rcp > 0.0 {
572 Some(self * rcp)
573 } else {
574 None
575 }
576 }
577
578 /// Returns `self` normalized to length 1.0 if possible, else returns a
579 /// fallback value.
580 ///
581 /// In particular, if the input is zero (or very close to zero), or non-finite,
582 /// the result of this operation will be the fallback value.
583 ///
584 /// See also [`Self::try_normalize()`].
585 #[inline]
586 #[must_use]
normalize_or(self, fallback: Self) -> Self587 pub fn normalize_or(self, fallback: Self) -> Self {
588 let rcp = self.length_recip();
589 if rcp.is_finite() && rcp > 0.0 {
590 self * rcp
591 } else {
592 fallback
593 }
594 }
595
596 /// Returns `self` normalized to length 1.0 if possible, else returns zero.
597 ///
598 /// In particular, if the input is zero (or very close to zero), or non-finite,
599 /// the result of this operation will be zero.
600 ///
601 /// See also [`Self::try_normalize()`].
602 #[inline]
603 #[must_use]
normalize_or_zero(self) -> Self604 pub fn normalize_or_zero(self) -> Self {
605 self.normalize_or(Self::ZERO)
606 }
607
608 /// Returns whether `self` is length `1.0` or not.
609 ///
610 /// Uses a precision threshold of approximately `1e-4`.
611 #[inline]
612 #[must_use]
is_normalized(self) -> bool613 pub fn is_normalized(self) -> bool {
614 math::abs(self.length_squared() - 1.0) <= 2e-4
615 }
616
617 /// Returns the vector projection of `self` onto `rhs`.
618 ///
619 /// `rhs` must be of non-zero length.
620 ///
621 /// # Panics
622 ///
623 /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
624 #[inline]
625 #[must_use]
project_onto(self, rhs: Self) -> Self626 pub fn project_onto(self, rhs: Self) -> Self {
627 let other_len_sq_rcp = rhs.dot(rhs).recip();
628 glam_assert!(other_len_sq_rcp.is_finite());
629 rhs * self.dot(rhs) * other_len_sq_rcp
630 }
631
632 /// Returns the vector rejection of `self` from `rhs`.
633 ///
634 /// The vector rejection is the vector perpendicular to the projection of `self` onto
635 /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
636 ///
637 /// `rhs` must be of non-zero length.
638 ///
639 /// # Panics
640 ///
641 /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
642 #[doc(alias("plane"))]
643 #[inline]
644 #[must_use]
reject_from(self, rhs: Self) -> Self645 pub fn reject_from(self, rhs: Self) -> Self {
646 self - self.project_onto(rhs)
647 }
648
649 /// Returns the vector projection of `self` onto `rhs`.
650 ///
651 /// `rhs` must be normalized.
652 ///
653 /// # Panics
654 ///
655 /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
656 #[inline]
657 #[must_use]
project_onto_normalized(self, rhs: Self) -> Self658 pub fn project_onto_normalized(self, rhs: Self) -> Self {
659 glam_assert!(rhs.is_normalized());
660 rhs * self.dot(rhs)
661 }
662
663 /// Returns the vector rejection of `self` from `rhs`.
664 ///
665 /// The vector rejection is the vector perpendicular to the projection of `self` onto
666 /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
667 ///
668 /// `rhs` must be normalized.
669 ///
670 /// # Panics
671 ///
672 /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
673 #[doc(alias("plane"))]
674 #[inline]
675 #[must_use]
reject_from_normalized(self, rhs: Self) -> Self676 pub fn reject_from_normalized(self, rhs: Self) -> Self {
677 self - self.project_onto_normalized(rhs)
678 }
679
680 /// Returns a vector containing the nearest integer to a number for each element of `self`.
681 /// Round half-way cases away from 0.0.
682 #[inline]
683 #[must_use]
round(self) -> Self684 pub fn round(self) -> Self {
685 Self(unsafe { vrndnq_f32(self.0) })
686 }
687
688 /// Returns a vector containing the largest integer less than or equal to a number for each
689 /// element of `self`.
690 #[inline]
691 #[must_use]
floor(self) -> Self692 pub fn floor(self) -> Self {
693 Self(unsafe { vrndmq_f32(self.0) })
694 }
695
696 /// Returns a vector containing the smallest integer greater than or equal to a number for
697 /// each element of `self`.
698 #[inline]
699 #[must_use]
ceil(self) -> Self700 pub fn ceil(self) -> Self {
701 Self(unsafe { vrndpq_f32(self.0) })
702 }
703
704 /// Returns a vector containing the integer part each element of `self`. This means numbers are
705 /// always truncated towards zero.
706 #[inline]
707 #[must_use]
trunc(self) -> Self708 pub fn trunc(self) -> Self {
709 Self(unsafe { vrndq_f32(self.0) })
710 }
711
712 /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
713 ///
714 /// Note that this differs from the GLSL implementation of `fract` which returns
715 /// `self - self.floor()`.
716 ///
717 /// Note that this is fast but not precise for large numbers.
718 #[inline]
719 #[must_use]
fract(self) -> Self720 pub fn fract(self) -> Self {
721 self - self.trunc()
722 }
723
724 /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
725 ///
726 /// Note that this differs from the Rust implementation of `fract` which returns
727 /// `self - self.trunc()`.
728 ///
729 /// Note that this is fast but not precise for large numbers.
730 #[inline]
731 #[must_use]
fract_gl(self) -> Self732 pub fn fract_gl(self) -> Self {
733 self - self.floor()
734 }
735
736 /// Returns a vector containing `e^self` (the exponential function) for each element of
737 /// `self`.
738 #[inline]
739 #[must_use]
exp(self) -> Self740 pub fn exp(self) -> Self {
741 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
742 }
743
744 /// Returns a vector containing each element of `self` raised to the power of `n`.
745 #[inline]
746 #[must_use]
powf(self, n: f32) -> Self747 pub fn powf(self, n: f32) -> Self {
748 Self::new(
749 math::powf(self.x, n),
750 math::powf(self.y, n),
751 math::powf(self.z, n),
752 )
753 }
754
755 /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
756 #[inline]
757 #[must_use]
recip(self) -> Self758 pub fn recip(self) -> Self {
759 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
760 }
761
762 /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
763 ///
764 /// When `s` is `0.0`, the result will be equal to `self`. When `s` is `1.0`, the result
765 /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
766 /// extrapolated.
767 #[doc(alias = "mix")]
768 #[inline]
769 #[must_use]
lerp(self, rhs: Self, s: f32) -> Self770 pub fn lerp(self, rhs: Self, s: f32) -> Self {
771 self * (1.0 - s) + rhs * s
772 }
773
774 /// Moves towards `rhs` based on the value `d`.
775 ///
776 /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
777 /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
778 #[inline]
779 #[must_use]
move_towards(&self, rhs: Self, d: f32) -> Self780 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
781 let a = rhs - *self;
782 let len = a.length();
783 if len <= d || len <= 1e-4 {
784 return rhs;
785 }
786 *self + a / len * d
787 }
788
789 /// Calculates the midpoint between `self` and `rhs`.
790 ///
791 /// The midpoint is the average of, or halfway point between, two vectors.
792 /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
793 /// while being slightly cheaper to compute.
794 #[inline]
midpoint(self, rhs: Self) -> Self795 pub fn midpoint(self, rhs: Self) -> Self {
796 (self + rhs) * 0.5
797 }
798
799 /// Returns true if the absolute difference of all elements between `self` and `rhs` is
800 /// less than or equal to `max_abs_diff`.
801 ///
802 /// This can be used to compare if two vectors contain similar elements. It works best when
803 /// comparing with a known value. The `max_abs_diff` that should be used used depends on
804 /// the values being compared against.
805 ///
806 /// For more see
807 /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
808 #[inline]
809 #[must_use]
abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool810 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
811 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
812 }
813
814 /// Returns a vector with a length no less than `min` and no more than `max`.
815 ///
816 /// # Panics
817 ///
818 /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
819 #[inline]
820 #[must_use]
clamp_length(self, min: f32, max: f32) -> Self821 pub fn clamp_length(self, min: f32, max: f32) -> Self {
822 glam_assert!(0.0 <= min);
823 glam_assert!(min <= max);
824 let length_sq = self.length_squared();
825 if length_sq < min * min {
826 min * (self / math::sqrt(length_sq))
827 } else if length_sq > max * max {
828 max * (self / math::sqrt(length_sq))
829 } else {
830 self
831 }
832 }
833
834 /// Returns a vector with a length no more than `max`.
835 ///
836 /// # Panics
837 ///
838 /// Will panic if `max` is negative when `glam_assert` is enabled.
839 #[inline]
840 #[must_use]
clamp_length_max(self, max: f32) -> Self841 pub fn clamp_length_max(self, max: f32) -> Self {
842 glam_assert!(0.0 <= max);
843 let length_sq = self.length_squared();
844 if length_sq > max * max {
845 max * (self / math::sqrt(length_sq))
846 } else {
847 self
848 }
849 }
850
851 /// Returns a vector with a length no less than `min`.
852 ///
853 /// # Panics
854 ///
855 /// Will panic if `min` is negative when `glam_assert` is enabled.
856 #[inline]
857 #[must_use]
clamp_length_min(self, min: f32) -> Self858 pub fn clamp_length_min(self, min: f32) -> Self {
859 glam_assert!(0.0 <= min);
860 let length_sq = self.length_squared();
861 if length_sq < min * min {
862 min * (self / math::sqrt(length_sq))
863 } else {
864 self
865 }
866 }
867
868 /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
869 /// error, yielding a more accurate result than an unfused multiply-add.
870 ///
871 /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
872 /// architecture has a dedicated fma CPU instruction. However, this is not always true,
873 /// and will be heavily dependant on designing algorithms with specific target hardware in
874 /// mind.
875 #[inline]
876 #[must_use]
mul_add(self, a: Self, b: Self) -> Self877 pub fn mul_add(self, a: Self, b: Self) -> Self {
878 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
879 }
880
881 /// Returns the reflection vector for a given incident vector `self` and surface normal
882 /// `normal`.
883 ///
884 /// `normal` must be normalized.
885 ///
886 /// # Panics
887 ///
888 /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
889 #[inline]
890 #[must_use]
reflect(self, normal: Self) -> Self891 pub fn reflect(self, normal: Self) -> Self {
892 glam_assert!(normal.is_normalized());
893 self - 2.0 * self.dot(normal) * normal
894 }
895
896 /// Returns the refraction direction for a given incident vector `self`, surface normal
897 /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
898 /// a zero vector will be returned.
899 ///
900 /// `self` and `normal` must be normalized.
901 ///
902 /// # Panics
903 ///
904 /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
905 #[inline]
906 #[must_use]
refract(self, normal: Self, eta: f32) -> Self907 pub fn refract(self, normal: Self, eta: f32) -> Self {
908 glam_assert!(self.is_normalized());
909 glam_assert!(normal.is_normalized());
910 let n_dot_i = normal.dot(self);
911 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
912 if k >= 0.0 {
913 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
914 } else {
915 Self::ZERO
916 }
917 }
918
919 /// Returns the angle (in radians) between two vectors in the range `[0, +π]`.
920 ///
921 /// The inputs do not need to be unit vectors however they must be non-zero.
922 #[inline]
923 #[must_use]
angle_between(self, rhs: Self) -> f32924 pub fn angle_between(self, rhs: Self) -> f32 {
925 math::acos_approx(
926 self.dot(rhs)
927 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
928 )
929 }
930
931 /// Returns some vector that is orthogonal to the given one.
932 ///
933 /// The input vector must be finite and non-zero.
934 ///
935 /// The output vector is not necessarily unit length. For that use
936 /// [`Self::any_orthonormal_vector()`] instead.
937 #[inline]
938 #[must_use]
any_orthogonal_vector(&self) -> Self939 pub fn any_orthogonal_vector(&self) -> Self {
940 // This can probably be optimized
941 if math::abs(self.x) > math::abs(self.y) {
942 Self::new(-self.z, 0.0, self.x) // self.cross(Self::Y)
943 } else {
944 Self::new(0.0, self.z, -self.y) // self.cross(Self::X)
945 }
946 }
947
948 /// Returns any unit vector that is orthogonal to the given one.
949 ///
950 /// The input vector must be unit length.
951 ///
952 /// # Panics
953 ///
954 /// Will panic if `self` is not normalized when `glam_assert` is enabled.
955 #[inline]
956 #[must_use]
any_orthonormal_vector(&self) -> Self957 pub fn any_orthonormal_vector(&self) -> Self {
958 glam_assert!(self.is_normalized());
959 // From https://graphics.pixar.com/library/OrthonormalB/paper.pdf
960 let sign = math::signum(self.z);
961 let a = -1.0 / (sign + self.z);
962 let b = self.x * self.y * a;
963 Self::new(b, sign + self.y * self.y * a, -self.y)
964 }
965
966 /// Given a unit vector return two other vectors that together form an orthonormal
967 /// basis. That is, all three vectors are orthogonal to each other and are normalized.
968 ///
969 /// # Panics
970 ///
971 /// Will panic if `self` is not normalized when `glam_assert` is enabled.
972 #[inline]
973 #[must_use]
any_orthonormal_pair(&self) -> (Self, Self)974 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
975 glam_assert!(self.is_normalized());
976 // From https://graphics.pixar.com/library/OrthonormalB/paper.pdf
977 let sign = math::signum(self.z);
978 let a = -1.0 / (sign + self.z);
979 let b = self.x * self.y * a;
980 (
981 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
982 Self::new(b, sign + self.y * self.y * a, -self.y),
983 )
984 }
985
986 /// Performs a spherical linear interpolation between `self` and `rhs` based on the value `s`.
987 ///
988 /// When `s` is `0.0`, the result will be equal to `self`. When `s` is `1.0`, the result
989 /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
990 /// extrapolated.
991 #[inline]
992 #[must_use]
slerp(self, rhs: Self, s: f32) -> Self993 pub fn slerp(self, rhs: Self, s: f32) -> Self {
994 let self_length = self.length();
995 let rhs_length = rhs.length();
996 // Cosine of the angle between the vectors [-1, 1], or NaN if either vector has a zero length
997 let dot = self.dot(rhs) / (self_length * rhs_length);
998 // If dot is close to 1 or -1, or is NaN the calculations for t1 and t2 break down
999 if math::abs(dot) < 1.0 - 3e-7 {
1000 // Angle between the vectors [0, +π]
1001 let theta = math::acos_approx(dot);
1002 // Sine of the angle between vectors [0, 1]
1003 let sin_theta = math::sin(theta);
1004 let t1 = math::sin(theta * (1. - s));
1005 let t2 = math::sin(theta * s);
1006
1007 // Interpolate vector lengths
1008 let result_length = self_length.lerp(rhs_length, s);
1009 // Scale the vectors to the target length and interpolate them
1010 return (self * (result_length / self_length) * t1
1011 + rhs * (result_length / rhs_length) * t2)
1012 * sin_theta.recip();
1013 }
1014 if dot < 0.0 {
1015 // Vectors are almost parallel in opposing directions
1016
1017 // Create a rotation from self to rhs along some axis
1018 let axis = self.any_orthogonal_vector().normalize().into();
1019 let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);
1020 // Interpolate vector lengths
1021 let result_length = self_length.lerp(rhs_length, s);
1022 rotation * self * (result_length / self_length)
1023 } else {
1024 // Vectors are almost parallel in the same direction, or dot was NaN
1025 self.lerp(rhs, s)
1026 }
1027 }
1028
1029 /// Casts all elements of `self` to `f64`.
1030 #[inline]
1031 #[must_use]
as_dvec3(&self) -> crate::DVec31032 pub fn as_dvec3(&self) -> crate::DVec3 {
1033 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
1034 }
1035
1036 /// Casts all elements of `self` to `i8`.
1037 #[inline]
1038 #[must_use]
as_i8vec3(&self) -> crate::I8Vec31039 pub fn as_i8vec3(&self) -> crate::I8Vec3 {
1040 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1041 }
1042
1043 /// Casts all elements of `self` to `u8`.
1044 #[inline]
1045 #[must_use]
as_u8vec3(&self) -> crate::U8Vec31046 pub fn as_u8vec3(&self) -> crate::U8Vec3 {
1047 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1048 }
1049
1050 /// Casts all elements of `self` to `i16`.
1051 #[inline]
1052 #[must_use]
as_i16vec3(&self) -> crate::I16Vec31053 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
1054 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1055 }
1056
1057 /// Casts all elements of `self` to `u16`.
1058 #[inline]
1059 #[must_use]
as_u16vec3(&self) -> crate::U16Vec31060 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
1061 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1062 }
1063
1064 /// Casts all elements of `self` to `i32`.
1065 #[inline]
1066 #[must_use]
as_ivec3(&self) -> crate::IVec31067 pub fn as_ivec3(&self) -> crate::IVec3 {
1068 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1069 }
1070
1071 /// Casts all elements of `self` to `u32`.
1072 #[inline]
1073 #[must_use]
as_uvec3(&self) -> crate::UVec31074 pub fn as_uvec3(&self) -> crate::UVec3 {
1075 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1076 }
1077
1078 /// Casts all elements of `self` to `i64`.
1079 #[inline]
1080 #[must_use]
as_i64vec3(&self) -> crate::I64Vec31081 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
1082 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1083 }
1084
1085 /// Casts all elements of `self` to `u64`.
1086 #[inline]
1087 #[must_use]
as_u64vec3(&self) -> crate::U64Vec31088 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
1089 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1090 }
1091 }
1092
1093 impl Default for Vec3A {
1094 #[inline(always)]
default() -> Self1095 fn default() -> Self {
1096 Self::ZERO
1097 }
1098 }
1099
1100 impl PartialEq for Vec3A {
1101 #[inline]
eq(&self, rhs: &Self) -> bool1102 fn eq(&self, rhs: &Self) -> bool {
1103 self.cmpeq(*rhs).all()
1104 }
1105 }
1106
1107 impl Div<Vec3A> for Vec3A {
1108 type Output = Self;
1109 #[inline]
div(self, rhs: Self) -> Self1110 fn div(self, rhs: Self) -> Self {
1111 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1112 }
1113 }
1114
1115 impl Div<&Vec3A> for Vec3A {
1116 type Output = Vec3A;
1117 #[inline]
div(self, rhs: &Vec3A) -> Vec3A1118 fn div(self, rhs: &Vec3A) -> Vec3A {
1119 self.div(*rhs)
1120 }
1121 }
1122
1123 impl Div<&Vec3A> for &Vec3A {
1124 type Output = Vec3A;
1125 #[inline]
div(self, rhs: &Vec3A) -> Vec3A1126 fn div(self, rhs: &Vec3A) -> Vec3A {
1127 (*self).div(*rhs)
1128 }
1129 }
1130
1131 impl Div<Vec3A> for &Vec3A {
1132 type Output = Vec3A;
1133 #[inline]
div(self, rhs: Vec3A) -> Vec3A1134 fn div(self, rhs: Vec3A) -> Vec3A {
1135 (*self).div(rhs)
1136 }
1137 }
1138
1139 impl DivAssign<Vec3A> for Vec3A {
1140 #[inline]
div_assign(&mut self, rhs: Self)1141 fn div_assign(&mut self, rhs: Self) {
1142 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1143 }
1144 }
1145
1146 impl DivAssign<&Vec3A> for Vec3A {
1147 #[inline]
div_assign(&mut self, rhs: &Vec3A)1148 fn div_assign(&mut self, rhs: &Vec3A) {
1149 self.div_assign(*rhs)
1150 }
1151 }
1152
1153 impl Div<f32> for Vec3A {
1154 type Output = Self;
1155 #[inline]
div(self, rhs: f32) -> Self1156 fn div(self, rhs: f32) -> Self {
1157 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1158 }
1159 }
1160
1161 impl Div<&f32> for Vec3A {
1162 type Output = Vec3A;
1163 #[inline]
div(self, rhs: &f32) -> Vec3A1164 fn div(self, rhs: &f32) -> Vec3A {
1165 self.div(*rhs)
1166 }
1167 }
1168
1169 impl Div<&f32> for &Vec3A {
1170 type Output = Vec3A;
1171 #[inline]
div(self, rhs: &f32) -> Vec3A1172 fn div(self, rhs: &f32) -> Vec3A {
1173 (*self).div(*rhs)
1174 }
1175 }
1176
1177 impl Div<f32> for &Vec3A {
1178 type Output = Vec3A;
1179 #[inline]
div(self, rhs: f32) -> Vec3A1180 fn div(self, rhs: f32) -> Vec3A {
1181 (*self).div(rhs)
1182 }
1183 }
1184
1185 impl DivAssign<f32> for Vec3A {
1186 #[inline]
div_assign(&mut self, rhs: f32)1187 fn div_assign(&mut self, rhs: f32) {
1188 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1189 }
1190 }
1191
1192 impl DivAssign<&f32> for Vec3A {
1193 #[inline]
div_assign(&mut self, rhs: &f32)1194 fn div_assign(&mut self, rhs: &f32) {
1195 self.div_assign(*rhs)
1196 }
1197 }
1198
1199 impl Div<Vec3A> for f32 {
1200 type Output = Vec3A;
1201 #[inline]
div(self, rhs: Vec3A) -> Vec3A1202 fn div(self, rhs: Vec3A) -> Vec3A {
1203 Vec3A(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1204 }
1205 }
1206
1207 impl Div<&Vec3A> for f32 {
1208 type Output = Vec3A;
1209 #[inline]
div(self, rhs: &Vec3A) -> Vec3A1210 fn div(self, rhs: &Vec3A) -> Vec3A {
1211 self.div(*rhs)
1212 }
1213 }
1214
1215 impl Div<&Vec3A> for &f32 {
1216 type Output = Vec3A;
1217 #[inline]
div(self, rhs: &Vec3A) -> Vec3A1218 fn div(self, rhs: &Vec3A) -> Vec3A {
1219 (*self).div(*rhs)
1220 }
1221 }
1222
1223 impl Div<Vec3A> for &f32 {
1224 type Output = Vec3A;
1225 #[inline]
div(self, rhs: Vec3A) -> Vec3A1226 fn div(self, rhs: Vec3A) -> Vec3A {
1227 (*self).div(rhs)
1228 }
1229 }
1230
1231 impl Mul<Vec3A> for Vec3A {
1232 type Output = Self;
1233 #[inline]
mul(self, rhs: Self) -> Self1234 fn mul(self, rhs: Self) -> Self {
1235 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1236 }
1237 }
1238
1239 impl Mul<&Vec3A> for Vec3A {
1240 type Output = Vec3A;
1241 #[inline]
mul(self, rhs: &Vec3A) -> Vec3A1242 fn mul(self, rhs: &Vec3A) -> Vec3A {
1243 self.mul(*rhs)
1244 }
1245 }
1246
1247 impl Mul<&Vec3A> for &Vec3A {
1248 type Output = Vec3A;
1249 #[inline]
mul(self, rhs: &Vec3A) -> Vec3A1250 fn mul(self, rhs: &Vec3A) -> Vec3A {
1251 (*self).mul(*rhs)
1252 }
1253 }
1254
1255 impl Mul<Vec3A> for &Vec3A {
1256 type Output = Vec3A;
1257 #[inline]
mul(self, rhs: Vec3A) -> Vec3A1258 fn mul(self, rhs: Vec3A) -> Vec3A {
1259 (*self).mul(rhs)
1260 }
1261 }
1262
1263 impl MulAssign<Vec3A> for Vec3A {
1264 #[inline]
mul_assign(&mut self, rhs: Self)1265 fn mul_assign(&mut self, rhs: Self) {
1266 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1267 }
1268 }
1269
1270 impl MulAssign<&Vec3A> for Vec3A {
1271 #[inline]
mul_assign(&mut self, rhs: &Vec3A)1272 fn mul_assign(&mut self, rhs: &Vec3A) {
1273 self.mul_assign(*rhs)
1274 }
1275 }
1276
1277 impl Mul<f32> for Vec3A {
1278 type Output = Self;
1279 #[inline]
mul(self, rhs: f32) -> Self1280 fn mul(self, rhs: f32) -> Self {
1281 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1282 }
1283 }
1284
1285 impl Mul<&f32> for Vec3A {
1286 type Output = Vec3A;
1287 #[inline]
mul(self, rhs: &f32) -> Vec3A1288 fn mul(self, rhs: &f32) -> Vec3A {
1289 self.mul(*rhs)
1290 }
1291 }
1292
1293 impl Mul<&f32> for &Vec3A {
1294 type Output = Vec3A;
1295 #[inline]
mul(self, rhs: &f32) -> Vec3A1296 fn mul(self, rhs: &f32) -> Vec3A {
1297 (*self).mul(*rhs)
1298 }
1299 }
1300
1301 impl Mul<f32> for &Vec3A {
1302 type Output = Vec3A;
1303 #[inline]
mul(self, rhs: f32) -> Vec3A1304 fn mul(self, rhs: f32) -> Vec3A {
1305 (*self).mul(rhs)
1306 }
1307 }
1308
1309 impl MulAssign<f32> for Vec3A {
1310 #[inline]
mul_assign(&mut self, rhs: f32)1311 fn mul_assign(&mut self, rhs: f32) {
1312 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1313 }
1314 }
1315
1316 impl MulAssign<&f32> for Vec3A {
1317 #[inline]
mul_assign(&mut self, rhs: &f32)1318 fn mul_assign(&mut self, rhs: &f32) {
1319 self.mul_assign(*rhs)
1320 }
1321 }
1322
1323 impl Mul<Vec3A> for f32 {
1324 type Output = Vec3A;
1325 #[inline]
mul(self, rhs: Vec3A) -> Vec3A1326 fn mul(self, rhs: Vec3A) -> Vec3A {
1327 Vec3A(unsafe { vmulq_n_f32(rhs.0, self) })
1328 }
1329 }
1330
1331 impl Mul<&Vec3A> for f32 {
1332 type Output = Vec3A;
1333 #[inline]
mul(self, rhs: &Vec3A) -> Vec3A1334 fn mul(self, rhs: &Vec3A) -> Vec3A {
1335 self.mul(*rhs)
1336 }
1337 }
1338
1339 impl Mul<&Vec3A> for &f32 {
1340 type Output = Vec3A;
1341 #[inline]
mul(self, rhs: &Vec3A) -> Vec3A1342 fn mul(self, rhs: &Vec3A) -> Vec3A {
1343 (*self).mul(*rhs)
1344 }
1345 }
1346
1347 impl Mul<Vec3A> for &f32 {
1348 type Output = Vec3A;
1349 #[inline]
mul(self, rhs: Vec3A) -> Vec3A1350 fn mul(self, rhs: Vec3A) -> Vec3A {
1351 (*self).mul(rhs)
1352 }
1353 }
1354
1355 impl Add<Vec3A> for Vec3A {
1356 type Output = Self;
1357 #[inline]
add(self, rhs: Self) -> Self1358 fn add(self, rhs: Self) -> Self {
1359 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1360 }
1361 }
1362
1363 impl Add<&Vec3A> for Vec3A {
1364 type Output = Vec3A;
1365 #[inline]
add(self, rhs: &Vec3A) -> Vec3A1366 fn add(self, rhs: &Vec3A) -> Vec3A {
1367 self.add(*rhs)
1368 }
1369 }
1370
1371 impl Add<&Vec3A> for &Vec3A {
1372 type Output = Vec3A;
1373 #[inline]
add(self, rhs: &Vec3A) -> Vec3A1374 fn add(self, rhs: &Vec3A) -> Vec3A {
1375 (*self).add(*rhs)
1376 }
1377 }
1378
1379 impl Add<Vec3A> for &Vec3A {
1380 type Output = Vec3A;
1381 #[inline]
add(self, rhs: Vec3A) -> Vec3A1382 fn add(self, rhs: Vec3A) -> Vec3A {
1383 (*self).add(rhs)
1384 }
1385 }
1386
1387 impl AddAssign<Vec3A> for Vec3A {
1388 #[inline]
add_assign(&mut self, rhs: Self)1389 fn add_assign(&mut self, rhs: Self) {
1390 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1391 }
1392 }
1393
1394 impl AddAssign<&Vec3A> for Vec3A {
1395 #[inline]
add_assign(&mut self, rhs: &Vec3A)1396 fn add_assign(&mut self, rhs: &Vec3A) {
1397 self.add_assign(*rhs)
1398 }
1399 }
1400
1401 impl Add<f32> for Vec3A {
1402 type Output = Self;
1403 #[inline]
add(self, rhs: f32) -> Self1404 fn add(self, rhs: f32) -> Self {
1405 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1406 }
1407 }
1408
1409 impl Add<&f32> for Vec3A {
1410 type Output = Vec3A;
1411 #[inline]
add(self, rhs: &f32) -> Vec3A1412 fn add(self, rhs: &f32) -> Vec3A {
1413 self.add(*rhs)
1414 }
1415 }
1416
1417 impl Add<&f32> for &Vec3A {
1418 type Output = Vec3A;
1419 #[inline]
add(self, rhs: &f32) -> Vec3A1420 fn add(self, rhs: &f32) -> Vec3A {
1421 (*self).add(*rhs)
1422 }
1423 }
1424
1425 impl Add<f32> for &Vec3A {
1426 type Output = Vec3A;
1427 #[inline]
add(self, rhs: f32) -> Vec3A1428 fn add(self, rhs: f32) -> Vec3A {
1429 (*self).add(rhs)
1430 }
1431 }
1432
1433 impl AddAssign<f32> for Vec3A {
1434 #[inline]
add_assign(&mut self, rhs: f32)1435 fn add_assign(&mut self, rhs: f32) {
1436 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1437 }
1438 }
1439
1440 impl AddAssign<&f32> for Vec3A {
1441 #[inline]
add_assign(&mut self, rhs: &f32)1442 fn add_assign(&mut self, rhs: &f32) {
1443 self.add_assign(*rhs)
1444 }
1445 }
1446
1447 impl Add<Vec3A> for f32 {
1448 type Output = Vec3A;
1449 #[inline]
add(self, rhs: Vec3A) -> Vec3A1450 fn add(self, rhs: Vec3A) -> Vec3A {
1451 Vec3A(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1452 }
1453 }
1454
1455 impl Add<&Vec3A> for f32 {
1456 type Output = Vec3A;
1457 #[inline]
add(self, rhs: &Vec3A) -> Vec3A1458 fn add(self, rhs: &Vec3A) -> Vec3A {
1459 self.add(*rhs)
1460 }
1461 }
1462
1463 impl Add<&Vec3A> for &f32 {
1464 type Output = Vec3A;
1465 #[inline]
add(self, rhs: &Vec3A) -> Vec3A1466 fn add(self, rhs: &Vec3A) -> Vec3A {
1467 (*self).add(*rhs)
1468 }
1469 }
1470
1471 impl Add<Vec3A> for &f32 {
1472 type Output = Vec3A;
1473 #[inline]
add(self, rhs: Vec3A) -> Vec3A1474 fn add(self, rhs: Vec3A) -> Vec3A {
1475 (*self).add(rhs)
1476 }
1477 }
1478
1479 impl Sub<Vec3A> for Vec3A {
1480 type Output = Self;
1481 #[inline]
sub(self, rhs: Self) -> Self1482 fn sub(self, rhs: Self) -> Self {
1483 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1484 }
1485 }
1486
1487 impl Sub<&Vec3A> for Vec3A {
1488 type Output = Vec3A;
1489 #[inline]
sub(self, rhs: &Vec3A) -> Vec3A1490 fn sub(self, rhs: &Vec3A) -> Vec3A {
1491 self.sub(*rhs)
1492 }
1493 }
1494
1495 impl Sub<&Vec3A> for &Vec3A {
1496 type Output = Vec3A;
1497 #[inline]
sub(self, rhs: &Vec3A) -> Vec3A1498 fn sub(self, rhs: &Vec3A) -> Vec3A {
1499 (*self).sub(*rhs)
1500 }
1501 }
1502
1503 impl Sub<Vec3A> for &Vec3A {
1504 type Output = Vec3A;
1505 #[inline]
sub(self, rhs: Vec3A) -> Vec3A1506 fn sub(self, rhs: Vec3A) -> Vec3A {
1507 (*self).sub(rhs)
1508 }
1509 }
1510
1511 impl SubAssign<Vec3A> for Vec3A {
1512 #[inline]
sub_assign(&mut self, rhs: Vec3A)1513 fn sub_assign(&mut self, rhs: Vec3A) {
1514 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1515 }
1516 }
1517
1518 impl SubAssign<&Vec3A> for Vec3A {
1519 #[inline]
sub_assign(&mut self, rhs: &Vec3A)1520 fn sub_assign(&mut self, rhs: &Vec3A) {
1521 self.sub_assign(*rhs)
1522 }
1523 }
1524
1525 impl Sub<f32> for Vec3A {
1526 type Output = Self;
1527 #[inline]
sub(self, rhs: f32) -> Self1528 fn sub(self, rhs: f32) -> Self {
1529 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1530 }
1531 }
1532
1533 impl Sub<&f32> for Vec3A {
1534 type Output = Vec3A;
1535 #[inline]
sub(self, rhs: &f32) -> Vec3A1536 fn sub(self, rhs: &f32) -> Vec3A {
1537 self.sub(*rhs)
1538 }
1539 }
1540
1541 impl Sub<&f32> for &Vec3A {
1542 type Output = Vec3A;
1543 #[inline]
sub(self, rhs: &f32) -> Vec3A1544 fn sub(self, rhs: &f32) -> Vec3A {
1545 (*self).sub(*rhs)
1546 }
1547 }
1548
1549 impl Sub<f32> for &Vec3A {
1550 type Output = Vec3A;
1551 #[inline]
sub(self, rhs: f32) -> Vec3A1552 fn sub(self, rhs: f32) -> Vec3A {
1553 (*self).sub(rhs)
1554 }
1555 }
1556
1557 impl SubAssign<f32> for Vec3A {
1558 #[inline]
sub_assign(&mut self, rhs: f32)1559 fn sub_assign(&mut self, rhs: f32) {
1560 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1561 }
1562 }
1563
1564 impl SubAssign<&f32> for Vec3A {
1565 #[inline]
sub_assign(&mut self, rhs: &f32)1566 fn sub_assign(&mut self, rhs: &f32) {
1567 self.sub_assign(*rhs)
1568 }
1569 }
1570
1571 impl Sub<Vec3A> for f32 {
1572 type Output = Vec3A;
1573 #[inline]
sub(self, rhs: Vec3A) -> Vec3A1574 fn sub(self, rhs: Vec3A) -> Vec3A {
1575 Vec3A(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1576 }
1577 }
1578
1579 impl Sub<&Vec3A> for f32 {
1580 type Output = Vec3A;
1581 #[inline]
sub(self, rhs: &Vec3A) -> Vec3A1582 fn sub(self, rhs: &Vec3A) -> Vec3A {
1583 self.sub(*rhs)
1584 }
1585 }
1586
1587 impl Sub<&Vec3A> for &f32 {
1588 type Output = Vec3A;
1589 #[inline]
sub(self, rhs: &Vec3A) -> Vec3A1590 fn sub(self, rhs: &Vec3A) -> Vec3A {
1591 (*self).sub(*rhs)
1592 }
1593 }
1594
1595 impl Sub<Vec3A> for &f32 {
1596 type Output = Vec3A;
1597 #[inline]
sub(self, rhs: Vec3A) -> Vec3A1598 fn sub(self, rhs: Vec3A) -> Vec3A {
1599 (*self).sub(rhs)
1600 }
1601 }
1602
1603 impl Rem<Vec3A> for Vec3A {
1604 type Output = Self;
1605 #[inline]
rem(self, rhs: Self) -> Self1606 fn rem(self, rhs: Self) -> Self {
1607 unsafe {
1608 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1609 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1610 }
1611 }
1612 }
1613
1614 impl Rem<&Vec3A> for Vec3A {
1615 type Output = Vec3A;
1616 #[inline]
rem(self, rhs: &Vec3A) -> Vec3A1617 fn rem(self, rhs: &Vec3A) -> Vec3A {
1618 self.rem(*rhs)
1619 }
1620 }
1621
1622 impl Rem<&Vec3A> for &Vec3A {
1623 type Output = Vec3A;
1624 #[inline]
rem(self, rhs: &Vec3A) -> Vec3A1625 fn rem(self, rhs: &Vec3A) -> Vec3A {
1626 (*self).rem(*rhs)
1627 }
1628 }
1629
1630 impl Rem<Vec3A> for &Vec3A {
1631 type Output = Vec3A;
1632 #[inline]
rem(self, rhs: Vec3A) -> Vec3A1633 fn rem(self, rhs: Vec3A) -> Vec3A {
1634 (*self).rem(rhs)
1635 }
1636 }
1637
1638 impl RemAssign<Vec3A> for Vec3A {
1639 #[inline]
rem_assign(&mut self, rhs: Self)1640 fn rem_assign(&mut self, rhs: Self) {
1641 *self = self.rem(rhs);
1642 }
1643 }
1644
1645 impl RemAssign<&Vec3A> for Vec3A {
1646 #[inline]
rem_assign(&mut self, rhs: &Vec3A)1647 fn rem_assign(&mut self, rhs: &Vec3A) {
1648 self.rem_assign(*rhs)
1649 }
1650 }
1651
1652 impl Rem<f32> for Vec3A {
1653 type Output = Self;
1654 #[inline]
rem(self, rhs: f32) -> Self1655 fn rem(self, rhs: f32) -> Self {
1656 self.rem(Self::splat(rhs))
1657 }
1658 }
1659
1660 impl Rem<&f32> for Vec3A {
1661 type Output = Vec3A;
1662 #[inline]
rem(self, rhs: &f32) -> Vec3A1663 fn rem(self, rhs: &f32) -> Vec3A {
1664 self.rem(*rhs)
1665 }
1666 }
1667
1668 impl Rem<&f32> for &Vec3A {
1669 type Output = Vec3A;
1670 #[inline]
rem(self, rhs: &f32) -> Vec3A1671 fn rem(self, rhs: &f32) -> Vec3A {
1672 (*self).rem(*rhs)
1673 }
1674 }
1675
1676 impl Rem<f32> for &Vec3A {
1677 type Output = Vec3A;
1678 #[inline]
rem(self, rhs: f32) -> Vec3A1679 fn rem(self, rhs: f32) -> Vec3A {
1680 (*self).rem(rhs)
1681 }
1682 }
1683
1684 impl RemAssign<f32> for Vec3A {
1685 #[inline]
rem_assign(&mut self, rhs: f32)1686 fn rem_assign(&mut self, rhs: f32) {
1687 *self = self.rem(Self::splat(rhs));
1688 }
1689 }
1690
1691 impl RemAssign<&f32> for Vec3A {
1692 #[inline]
rem_assign(&mut self, rhs: &f32)1693 fn rem_assign(&mut self, rhs: &f32) {
1694 self.rem_assign(*rhs)
1695 }
1696 }
1697
1698 impl Rem<Vec3A> for f32 {
1699 type Output = Vec3A;
1700 #[inline]
rem(self, rhs: Vec3A) -> Vec3A1701 fn rem(self, rhs: Vec3A) -> Vec3A {
1702 Vec3A::splat(self).rem(rhs)
1703 }
1704 }
1705
1706 impl Rem<&Vec3A> for f32 {
1707 type Output = Vec3A;
1708 #[inline]
rem(self, rhs: &Vec3A) -> Vec3A1709 fn rem(self, rhs: &Vec3A) -> Vec3A {
1710 self.rem(*rhs)
1711 }
1712 }
1713
1714 impl Rem<&Vec3A> for &f32 {
1715 type Output = Vec3A;
1716 #[inline]
rem(self, rhs: &Vec3A) -> Vec3A1717 fn rem(self, rhs: &Vec3A) -> Vec3A {
1718 (*self).rem(*rhs)
1719 }
1720 }
1721
1722 impl Rem<Vec3A> for &f32 {
1723 type Output = Vec3A;
1724 #[inline]
rem(self, rhs: Vec3A) -> Vec3A1725 fn rem(self, rhs: Vec3A) -> Vec3A {
1726 (*self).rem(rhs)
1727 }
1728 }
1729
1730 #[cfg(not(target_arch = "spirv"))]
1731 impl AsRef<[f32; 3]> for Vec3A {
1732 #[inline]
as_ref(&self) -> &[f32; 3]1733 fn as_ref(&self) -> &[f32; 3] {
1734 unsafe { &*(self as *const Vec3A as *const [f32; 3]) }
1735 }
1736 }
1737
1738 #[cfg(not(target_arch = "spirv"))]
1739 impl AsMut<[f32; 3]> for Vec3A {
1740 #[inline]
as_mut(&mut self) -> &mut [f32; 3]1741 fn as_mut(&mut self) -> &mut [f32; 3] {
1742 unsafe { &mut *(self as *mut Vec3A as *mut [f32; 3]) }
1743 }
1744 }
1745
1746 impl Sum for Vec3A {
1747 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = Self>,1748 fn sum<I>(iter: I) -> Self
1749 where
1750 I: Iterator<Item = Self>,
1751 {
1752 iter.fold(Self::ZERO, Self::add)
1753 }
1754 }
1755
1756 impl<'a> Sum<&'a Self> for Vec3A {
1757 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1758 fn sum<I>(iter: I) -> Self
1759 where
1760 I: Iterator<Item = &'a Self>,
1761 {
1762 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1763 }
1764 }
1765
1766 impl Product for Vec3A {
1767 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = Self>,1768 fn product<I>(iter: I) -> Self
1769 where
1770 I: Iterator<Item = Self>,
1771 {
1772 iter.fold(Self::ONE, Self::mul)
1773 }
1774 }
1775
1776 impl<'a> Product<&'a Self> for Vec3A {
1777 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1778 fn product<I>(iter: I) -> Self
1779 where
1780 I: Iterator<Item = &'a Self>,
1781 {
1782 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1783 }
1784 }
1785
1786 impl Neg for Vec3A {
1787 type Output = Self;
1788 #[inline]
neg(self) -> Self1789 fn neg(self) -> Self {
1790 Self(unsafe { vnegq_f32(self.0) })
1791 }
1792 }
1793
1794 impl Neg for &Vec3A {
1795 type Output = Vec3A;
1796 #[inline]
neg(self) -> Vec3A1797 fn neg(self) -> Vec3A {
1798 (*self).neg()
1799 }
1800 }
1801
1802 impl Index<usize> for Vec3A {
1803 type Output = f32;
1804 #[inline]
index(&self, index: usize) -> &Self::Output1805 fn index(&self, index: usize) -> &Self::Output {
1806 match index {
1807 0 => &self.x,
1808 1 => &self.y,
1809 2 => &self.z,
1810 _ => panic!("index out of bounds"),
1811 }
1812 }
1813 }
1814
1815 impl IndexMut<usize> for Vec3A {
1816 #[inline]
index_mut(&mut self, index: usize) -> &mut Self::Output1817 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1818 match index {
1819 0 => &mut self.x,
1820 1 => &mut self.y,
1821 2 => &mut self.z,
1822 _ => panic!("index out of bounds"),
1823 }
1824 }
1825 }
1826
1827 impl fmt::Display for Vec3A {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1828 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1829 if let Some(p) = f.precision() {
1830 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
1831 } else {
1832 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
1833 }
1834 }
1835 }
1836
1837 impl fmt::Debug for Vec3A {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result1838 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1839 fmt.debug_tuple(stringify!(Vec3A))
1840 .field(&self.x)
1841 .field(&self.y)
1842 .field(&self.z)
1843 .finish()
1844 }
1845 }
1846
1847 impl From<Vec3A> for float32x4_t {
1848 #[inline(always)]
from(t: Vec3A) -> Self1849 fn from(t: Vec3A) -> Self {
1850 t.0
1851 }
1852 }
1853
1854 impl From<float32x4_t> for Vec3A {
1855 #[inline(always)]
from(t: float32x4_t) -> Self1856 fn from(t: float32x4_t) -> Self {
1857 Self(t)
1858 }
1859 }
1860
1861 impl From<[f32; 3]> for Vec3A {
1862 #[inline]
from(a: [f32; 3]) -> Self1863 fn from(a: [f32; 3]) -> Self {
1864 Self::new(a[0], a[1], a[2])
1865 }
1866 }
1867
1868 impl From<Vec3A> for [f32; 3] {
1869 #[inline]
from(v: Vec3A) -> Self1870 fn from(v: Vec3A) -> Self {
1871 use crate::align16::Align16;
1872 use core::mem::MaybeUninit;
1873 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1874 unsafe {
1875 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1876 out.assume_init().0
1877 }
1878 }
1879 }
1880
1881 impl From<(f32, f32, f32)> for Vec3A {
1882 #[inline]
from(t: (f32, f32, f32)) -> Self1883 fn from(t: (f32, f32, f32)) -> Self {
1884 Self::new(t.0, t.1, t.2)
1885 }
1886 }
1887
1888 impl From<Vec3A> for (f32, f32, f32) {
1889 #[inline]
from(v: Vec3A) -> Self1890 fn from(v: Vec3A) -> Self {
1891 use crate::align16::Align16;
1892 use core::mem::MaybeUninit;
1893 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1894 unsafe {
1895 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1896 out.assume_init().0
1897 }
1898 }
1899 }
1900
1901 impl From<Vec3> for Vec3A {
1902 #[inline]
from(v: Vec3) -> Self1903 fn from(v: Vec3) -> Self {
1904 Self::new(v.x, v.y, v.z)
1905 }
1906 }
1907
1908 impl From<Vec3A> for Vec3 {
1909 #[inline]
from(v: Vec3A) -> Self1910 fn from(v: Vec3A) -> Self {
1911 use crate::align16::Align16;
1912 use core::mem::MaybeUninit;
1913 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1914 unsafe {
1915 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1916 out.assume_init().0
1917 }
1918 }
1919 }
1920
1921 impl From<(Vec2, f32)> for Vec3A {
1922 #[inline]
from((v, z): (Vec2, f32)) -> Self1923 fn from((v, z): (Vec2, f32)) -> Self {
1924 Self::new(v.x, v.y, z)
1925 }
1926 }
1927
1928 impl Deref for Vec3A {
1929 type Target = crate::deref::Vec3<f32>;
1930 #[inline]
deref(&self) -> &Self::Target1931 fn deref(&self) -> &Self::Target {
1932 unsafe { &*(self as *const Self).cast() }
1933 }
1934 }
1935
1936 impl DerefMut for Vec3A {
1937 #[inline]
deref_mut(&mut self) -> &mut Self::Target1938 fn deref_mut(&mut self) -> &mut Self::Target {
1939 unsafe { &mut *(self as *mut Self).cast() }
1940 }
1941 }
1942
1943 impl From<BVec3> for Vec3A {
1944 #[inline]
from(v: BVec3) -> Self1945 fn from(v: BVec3) -> Self {
1946 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
1947 }
1948 }
1949
1950 impl From<BVec3A> for Vec3A {
1951 #[inline]
from(v: BVec3A) -> Self1952 fn from(v: BVec3A) -> Self {
1953 let bool_array: [bool; 3] = v.into();
1954 Self::new(
1955 f32::from(bool_array[0]),
1956 f32::from(bool_array[1]),
1957 f32::from(bool_array[2]),
1958 )
1959 }
1960 }
1961