1 // Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3 use crate::{f32::math, neon::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5 use core::fmt;
6 use core::iter::{Product, Sum};
7 use core::{f32, ops::*};
8
9 use core::arch::aarch64::*;
10
11 #[repr(C)]
12 union UnionCast {
13 a: [f32; 4],
14 v: Vec4,
15 }
16
17 /// Creates a 4-dimensional vector.
18 #[inline(always)]
19 #[must_use]
vec4(x: f32, y: f32, z: f32, w: f32) -> Vec420 pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
21 Vec4::new(x, y, z, w)
22 }
23
24 /// A 4-dimensional vector.
25 ///
26 /// SIMD vector types are used for storage on supported platforms.
27 ///
28 /// This type is 16 byte aligned.
29 #[derive(Clone, Copy)]
30 #[repr(transparent)]
31 pub struct Vec4(pub(crate) float32x4_t);
32
33 impl Vec4 {
34 /// All zeroes.
35 pub const ZERO: Self = Self::splat(0.0);
36
37 /// All ones.
38 pub const ONE: Self = Self::splat(1.0);
39
40 /// All negative ones.
41 pub const NEG_ONE: Self = Self::splat(-1.0);
42
43 /// All `f32::MIN`.
44 pub const MIN: Self = Self::splat(f32::MIN);
45
46 /// All `f32::MAX`.
47 pub const MAX: Self = Self::splat(f32::MAX);
48
49 /// All `f32::NAN`.
50 pub const NAN: Self = Self::splat(f32::NAN);
51
52 /// All `f32::INFINITY`.
53 pub const INFINITY: Self = Self::splat(f32::INFINITY);
54
55 /// All `f32::NEG_INFINITY`.
56 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
57
58 /// A unit vector pointing along the positive X axis.
59 pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
60
61 /// A unit vector pointing along the positive Y axis.
62 pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
63
64 /// A unit vector pointing along the positive Z axis.
65 pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
66
67 /// A unit vector pointing along the positive W axis.
68 pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
69
70 /// A unit vector pointing along the negative X axis.
71 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
72
73 /// A unit vector pointing along the negative Y axis.
74 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
75
76 /// A unit vector pointing along the negative Z axis.
77 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
78
79 /// A unit vector pointing along the negative W axis.
80 pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
81
82 /// The unit axes.
83 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
84
85 /// Creates a new vector.
86 #[inline(always)]
87 #[must_use]
new(x: f32, y: f32, z: f32, w: f32) -> Self88 pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
89 unsafe { UnionCast { a: [x, y, z, w] }.v }
90 }
91
92 /// Creates a vector with all elements set to `v`.
93 #[inline]
94 #[must_use]
splat(v: f32) -> Self95 pub const fn splat(v: f32) -> Self {
96 unsafe { UnionCast { a: [v; 4] }.v }
97 }
98
99 /// Returns a vector containing each element of `self` modified by a mapping function `f`.
100 #[inline]
101 #[must_use]
map<F>(self, f: F) -> Self where F: Fn(f32) -> f32,102 pub fn map<F>(self, f: F) -> Self
103 where
104 F: Fn(f32) -> f32,
105 {
106 Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
107 }
108
109 /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
110 /// for each element of `self`.
111 ///
112 /// A true element in the mask uses the corresponding element from `if_true`, and false
113 /// uses the element from `if_false`.
114 #[inline]
115 #[must_use]
select(mask: BVec4A, if_true: Self, if_false: Self) -> Self116 pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
117 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
118 }
119
120 /// Creates a new vector from an array.
121 #[inline]
122 #[must_use]
from_array(a: [f32; 4]) -> Self123 pub const fn from_array(a: [f32; 4]) -> Self {
124 Self::new(a[0], a[1], a[2], a[3])
125 }
126
127 /// `[x, y, z, w]`
128 #[inline]
129 #[must_use]
to_array(&self) -> [f32; 4]130 pub const fn to_array(&self) -> [f32; 4] {
131 unsafe { *(self as *const Vec4 as *const [f32; 4]) }
132 }
133
134 /// Creates a vector from the first 4 values in `slice`.
135 ///
136 /// # Panics
137 ///
138 /// Panics if `slice` is less than 4 elements long.
139 #[inline]
140 #[must_use]
from_slice(slice: &[f32]) -> Self141 pub const fn from_slice(slice: &[f32]) -> Self {
142 assert!(slice.len() >= 4);
143 Self::new(slice[0], slice[1], slice[2], slice[3])
144 }
145
146 /// Writes the elements of `self` to the first 4 elements in `slice`.
147 ///
148 /// # Panics
149 ///
150 /// Panics if `slice` is less than 4 elements long.
151 #[inline]
write_to_slice(self, slice: &mut [f32])152 pub fn write_to_slice(self, slice: &mut [f32]) {
153 assert!(slice.len() >= 4);
154 unsafe {
155 vst1q_f32(slice.as_mut_ptr(), self.0);
156 }
157 }
158
159 /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
160 ///
161 /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
162 ///
163 /// To truncate to [`Vec3A`] use [`Vec3A::from_vec4()`].
164 #[inline]
165 #[must_use]
truncate(self) -> Vec3166 pub fn truncate(self) -> Vec3 {
167 use crate::swizzles::Vec4Swizzles;
168 self.xyz()
169 }
170
171 /// Creates a 4D vector from `self` with the given value of `x`.
172 #[inline]
173 #[must_use]
with_x(mut self, x: f32) -> Self174 pub fn with_x(mut self, x: f32) -> Self {
175 self.x = x;
176 self
177 }
178
179 /// Creates a 4D vector from `self` with the given value of `y`.
180 #[inline]
181 #[must_use]
with_y(mut self, y: f32) -> Self182 pub fn with_y(mut self, y: f32) -> Self {
183 self.y = y;
184 self
185 }
186
187 /// Creates a 4D vector from `self` with the given value of `z`.
188 #[inline]
189 #[must_use]
with_z(mut self, z: f32) -> Self190 pub fn with_z(mut self, z: f32) -> Self {
191 self.z = z;
192 self
193 }
194
195 /// Creates a 4D vector from `self` with the given value of `w`.
196 #[inline]
197 #[must_use]
with_w(mut self, w: f32) -> Self198 pub fn with_w(mut self, w: f32) -> Self {
199 self.w = w;
200 self
201 }
202
203 /// Computes the dot product of `self` and `rhs`.
204 #[inline]
205 #[must_use]
dot(self, rhs: Self) -> f32206 pub fn dot(self, rhs: Self) -> f32 {
207 unsafe { dot4(self.0, rhs.0) }
208 }
209
210 /// Returns a vector where every component is the dot product of `self` and `rhs`.
211 #[inline]
212 #[must_use]
dot_into_vec(self, rhs: Self) -> Self213 pub fn dot_into_vec(self, rhs: Self) -> Self {
214 Self(unsafe { dot4_into_f32x4(self.0, rhs.0) })
215 }
216
217 /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
218 ///
219 /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`.
220 #[inline]
221 #[must_use]
min(self, rhs: Self) -> Self222 pub fn min(self, rhs: Self) -> Self {
223 Self(unsafe { vminq_f32(self.0, rhs.0) })
224 }
225
226 /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
227 ///
228 /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`.
229 #[inline]
230 #[must_use]
max(self, rhs: Self) -> Self231 pub fn max(self, rhs: Self) -> Self {
232 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
233 }
234
235 /// Component-wise clamping of values, similar to [`f32::clamp`].
236 ///
237 /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
238 ///
239 /// # Panics
240 ///
241 /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
242 #[inline]
243 #[must_use]
clamp(self, min: Self, max: Self) -> Self244 pub fn clamp(self, min: Self, max: Self) -> Self {
245 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
246 self.max(min).min(max)
247 }
248
249 /// Returns the horizontal minimum of `self`.
250 ///
251 /// In other words this computes `min(x, y, ..)`.
252 #[inline]
253 #[must_use]
min_element(self) -> f32254 pub fn min_element(self) -> f32 {
255 unsafe { vminnmvq_f32(self.0) }
256 }
257
258 /// Returns the horizontal maximum of `self`.
259 ///
260 /// In other words this computes `max(x, y, ..)`.
261 #[inline]
262 #[must_use]
max_element(self) -> f32263 pub fn max_element(self) -> f32 {
264 unsafe { vmaxnmvq_f32(self.0) }
265 }
266
267 /// Returns the sum of all elements of `self`.
268 ///
269 /// In other words, this computes `self.x + self.y + ..`.
270 #[inline]
271 #[must_use]
element_sum(self) -> f32272 pub fn element_sum(self) -> f32 {
273 unsafe { vaddvq_f32(self.0) }
274 }
275
276 /// Returns the product of all elements of `self`.
277 ///
278 /// In other words, this computes `self.x * self.y * ..`.
279 #[inline]
280 #[must_use]
element_product(self) -> f32281 pub fn element_product(self) -> f32 {
282 unsafe {
283 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
284 let s = vmuls_laneq_f32(s, self.0, 2);
285 vmuls_laneq_f32(s, self.0, 3)
286 }
287 }
288
289 /// Returns a vector mask containing the result of a `==` comparison for each element of
290 /// `self` and `rhs`.
291 ///
292 /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
293 /// elements.
294 #[inline]
295 #[must_use]
cmpeq(self, rhs: Self) -> BVec4A296 pub fn cmpeq(self, rhs: Self) -> BVec4A {
297 BVec4A(unsafe { vceqq_f32(self.0, rhs.0) })
298 }
299
300 /// Returns a vector mask containing the result of a `!=` comparison for each element of
301 /// `self` and `rhs`.
302 ///
303 /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
304 /// elements.
305 #[inline]
306 #[must_use]
cmpne(self, rhs: Self) -> BVec4A307 pub fn cmpne(self, rhs: Self) -> BVec4A {
308 BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
309 }
310
311 /// Returns a vector mask containing the result of a `>=` comparison for each element of
312 /// `self` and `rhs`.
313 ///
314 /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
315 /// elements.
316 #[inline]
317 #[must_use]
cmpge(self, rhs: Self) -> BVec4A318 pub fn cmpge(self, rhs: Self) -> BVec4A {
319 BVec4A(unsafe { vcgeq_f32(self.0, rhs.0) })
320 }
321
322 /// Returns a vector mask containing the result of a `>` comparison for each element of
323 /// `self` and `rhs`.
324 ///
325 /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
326 /// elements.
327 #[inline]
328 #[must_use]
cmpgt(self, rhs: Self) -> BVec4A329 pub fn cmpgt(self, rhs: Self) -> BVec4A {
330 BVec4A(unsafe { vcgtq_f32(self.0, rhs.0) })
331 }
332
333 /// Returns a vector mask containing the result of a `<=` comparison for each element of
334 /// `self` and `rhs`.
335 ///
336 /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
337 /// elements.
338 #[inline]
339 #[must_use]
cmple(self, rhs: Self) -> BVec4A340 pub fn cmple(self, rhs: Self) -> BVec4A {
341 BVec4A(unsafe { vcleq_f32(self.0, rhs.0) })
342 }
343
344 /// Returns a vector mask containing the result of a `<` comparison for each element of
345 /// `self` and `rhs`.
346 ///
347 /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
348 /// elements.
349 #[inline]
350 #[must_use]
cmplt(self, rhs: Self) -> BVec4A351 pub fn cmplt(self, rhs: Self) -> BVec4A {
352 BVec4A(unsafe { vcltq_f32(self.0, rhs.0) })
353 }
354
355 /// Returns a vector containing the absolute value of each element of `self`.
356 #[inline]
357 #[must_use]
abs(self) -> Self358 pub fn abs(self) -> Self {
359 Self(unsafe { vabsq_f32(self.0) })
360 }
361
362 /// Returns a vector with elements representing the sign of `self`.
363 ///
364 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
365 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
366 /// - `NAN` if the number is `NAN`
367 #[inline]
368 #[must_use]
signum(self) -> Self369 pub fn signum(self) -> Self {
370 let result = Self(unsafe {
371 vreinterpretq_f32_u32(vorrq_u32(
372 vandq_u32(
373 vreinterpretq_u32_f32(self.0),
374 vreinterpretq_u32_f32(Self::NEG_ONE.0),
375 ),
376 vreinterpretq_u32_f32(Self::ONE.0),
377 ))
378 });
379 let mask = self.is_nan_mask();
380 Self::select(mask, self, result)
381 }
382
383 /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
384 #[inline]
385 #[must_use]
copysign(self, rhs: Self) -> Self386 pub fn copysign(self, rhs: Self) -> Self {
387 let mask = Self::splat(-0.0);
388 Self(unsafe {
389 vreinterpretq_f32_u32(vorrq_u32(
390 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
391 vandq_u32(
392 vreinterpretq_u32_f32(self.0),
393 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
394 ),
395 ))
396 })
397 }
398
399 /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
400 ///
401 /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes
402 /// into the first lowest bit, element `y` into the second, etc.
403 #[inline]
404 #[must_use]
is_negative_bitmask(self) -> u32405 pub fn is_negative_bitmask(self) -> u32 {
406 unsafe {
407 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
408 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
409 let x = vgetq_lane_u32(m, 0) >> 31;
410 let y = vgetq_lane_u32(m, 1) >> 31;
411 let z = vgetq_lane_u32(m, 2) >> 31;
412
413 let w = vgetq_lane_u32(m, 3) >> 31;
414 x | y << 1 | z << 2 | w << 3
415 }
416 }
417
418 /// Returns `true` if, and only if, all elements are finite. If any element is either
419 /// `NaN`, positive or negative infinity, this will return `false`.
420 #[inline]
421 #[must_use]
is_finite(self) -> bool422 pub fn is_finite(self) -> bool {
423 self.is_finite_mask().all()
424 }
425
426 /// Performs `is_finite` on each element of self, returning a vector mask of the results.
427 ///
428 /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
is_finite_mask(self) -> BVec4A429 pub fn is_finite_mask(self) -> BVec4A {
430 BVec4A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
431 }
432
433 /// Returns `true` if any elements are `NaN`.
434 #[inline]
435 #[must_use]
is_nan(self) -> bool436 pub fn is_nan(self) -> bool {
437 self.is_nan_mask().any()
438 }
439
440 /// Performs `is_nan` on each element of self, returning a vector mask of the results.
441 ///
442 /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
443 #[inline]
444 #[must_use]
is_nan_mask(self) -> BVec4A445 pub fn is_nan_mask(self) -> BVec4A {
446 BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
447 }
448
449 /// Computes the length of `self`.
450 #[doc(alias = "magnitude")]
451 #[inline]
452 #[must_use]
length(self) -> f32453 pub fn length(self) -> f32 {
454 math::sqrt(self.dot(self))
455 }
456
457 /// Computes the squared length of `self`.
458 ///
459 /// This is faster than `length()` as it avoids a square root operation.
460 #[doc(alias = "magnitude2")]
461 #[inline]
462 #[must_use]
length_squared(self) -> f32463 pub fn length_squared(self) -> f32 {
464 self.dot(self)
465 }
466
467 /// Computes `1.0 / length()`.
468 ///
469 /// For valid results, `self` must _not_ be of length zero.
470 #[inline]
471 #[must_use]
length_recip(self) -> f32472 pub fn length_recip(self) -> f32 {
473 self.length().recip()
474 }
475
476 /// Computes the Euclidean distance between two points in space.
477 #[inline]
478 #[must_use]
distance(self, rhs: Self) -> f32479 pub fn distance(self, rhs: Self) -> f32 {
480 (self - rhs).length()
481 }
482
483 /// Compute the squared euclidean distance between two points in space.
484 #[inline]
485 #[must_use]
distance_squared(self, rhs: Self) -> f32486 pub fn distance_squared(self, rhs: Self) -> f32 {
487 (self - rhs).length_squared()
488 }
489
490 /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
491 #[inline]
492 #[must_use]
div_euclid(self, rhs: Self) -> Self493 pub fn div_euclid(self, rhs: Self) -> Self {
494 Self::new(
495 math::div_euclid(self.x, rhs.x),
496 math::div_euclid(self.y, rhs.y),
497 math::div_euclid(self.z, rhs.z),
498 math::div_euclid(self.w, rhs.w),
499 )
500 }
501
502 /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
503 ///
504 /// [Euclidean division]: f32::rem_euclid
505 #[inline]
506 #[must_use]
rem_euclid(self, rhs: Self) -> Self507 pub fn rem_euclid(self, rhs: Self) -> Self {
508 Self::new(
509 math::rem_euclid(self.x, rhs.x),
510 math::rem_euclid(self.y, rhs.y),
511 math::rem_euclid(self.z, rhs.z),
512 math::rem_euclid(self.w, rhs.w),
513 )
514 }
515
516 /// Returns `self` normalized to length 1.0.
517 ///
518 /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
519 ///
520 /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
521 ///
522 /// Panics
523 ///
524 /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
525 #[inline]
526 #[must_use]
normalize(self) -> Self527 pub fn normalize(self) -> Self {
528 #[allow(clippy::let_and_return)]
529 let normalized = self.mul(self.length_recip());
530 glam_assert!(normalized.is_finite());
531 normalized
532 }
533
534 /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
535 ///
536 /// In particular, if the input is zero (or very close to zero), or non-finite,
537 /// the result of this operation will be `None`.
538 ///
539 /// See also [`Self::normalize_or_zero()`].
540 #[inline]
541 #[must_use]
try_normalize(self) -> Option<Self>542 pub fn try_normalize(self) -> Option<Self> {
543 let rcp = self.length_recip();
544 if rcp.is_finite() && rcp > 0.0 {
545 Some(self * rcp)
546 } else {
547 None
548 }
549 }
550
551 /// Returns `self` normalized to length 1.0 if possible, else returns a
552 /// fallback value.
553 ///
554 /// In particular, if the input is zero (or very close to zero), or non-finite,
555 /// the result of this operation will be the fallback value.
556 ///
557 /// See also [`Self::try_normalize()`].
558 #[inline]
559 #[must_use]
normalize_or(self, fallback: Self) -> Self560 pub fn normalize_or(self, fallback: Self) -> Self {
561 let rcp = self.length_recip();
562 if rcp.is_finite() && rcp > 0.0 {
563 self * rcp
564 } else {
565 fallback
566 }
567 }
568
569 /// Returns `self` normalized to length 1.0 if possible, else returns zero.
570 ///
571 /// In particular, if the input is zero (or very close to zero), or non-finite,
572 /// the result of this operation will be zero.
573 ///
574 /// See also [`Self::try_normalize()`].
575 #[inline]
576 #[must_use]
normalize_or_zero(self) -> Self577 pub fn normalize_or_zero(self) -> Self {
578 self.normalize_or(Self::ZERO)
579 }
580
581 /// Returns whether `self` is length `1.0` or not.
582 ///
583 /// Uses a precision threshold of approximately `1e-4`.
584 #[inline]
585 #[must_use]
is_normalized(self) -> bool586 pub fn is_normalized(self) -> bool {
587 math::abs(self.length_squared() - 1.0) <= 2e-4
588 }
589
590 /// Returns the vector projection of `self` onto `rhs`.
591 ///
592 /// `rhs` must be of non-zero length.
593 ///
594 /// # Panics
595 ///
596 /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
597 #[inline]
598 #[must_use]
project_onto(self, rhs: Self) -> Self599 pub fn project_onto(self, rhs: Self) -> Self {
600 let other_len_sq_rcp = rhs.dot(rhs).recip();
601 glam_assert!(other_len_sq_rcp.is_finite());
602 rhs * self.dot(rhs) * other_len_sq_rcp
603 }
604
605 /// Returns the vector rejection of `self` from `rhs`.
606 ///
607 /// The vector rejection is the vector perpendicular to the projection of `self` onto
608 /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
609 ///
610 /// `rhs` must be of non-zero length.
611 ///
612 /// # Panics
613 ///
614 /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
615 #[doc(alias("plane"))]
616 #[inline]
617 #[must_use]
reject_from(self, rhs: Self) -> Self618 pub fn reject_from(self, rhs: Self) -> Self {
619 self - self.project_onto(rhs)
620 }
621
622 /// Returns the vector projection of `self` onto `rhs`.
623 ///
624 /// `rhs` must be normalized.
625 ///
626 /// # Panics
627 ///
628 /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
629 #[inline]
630 #[must_use]
project_onto_normalized(self, rhs: Self) -> Self631 pub fn project_onto_normalized(self, rhs: Self) -> Self {
632 glam_assert!(rhs.is_normalized());
633 rhs * self.dot(rhs)
634 }
635
636 /// Returns the vector rejection of `self` from `rhs`.
637 ///
638 /// The vector rejection is the vector perpendicular to the projection of `self` onto
639 /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
640 ///
641 /// `rhs` must be normalized.
642 ///
643 /// # Panics
644 ///
645 /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
646 #[doc(alias("plane"))]
647 #[inline]
648 #[must_use]
reject_from_normalized(self, rhs: Self) -> Self649 pub fn reject_from_normalized(self, rhs: Self) -> Self {
650 self - self.project_onto_normalized(rhs)
651 }
652
653 /// Returns a vector containing the nearest integer to a number for each element of `self`.
654 /// Round half-way cases away from 0.0.
655 #[inline]
656 #[must_use]
round(self) -> Self657 pub fn round(self) -> Self {
658 Self(unsafe { vrndnq_f32(self.0) })
659 }
660
661 /// Returns a vector containing the largest integer less than or equal to a number for each
662 /// element of `self`.
663 #[inline]
664 #[must_use]
floor(self) -> Self665 pub fn floor(self) -> Self {
666 Self(unsafe { vrndmq_f32(self.0) })
667 }
668
669 /// Returns a vector containing the smallest integer greater than or equal to a number for
670 /// each element of `self`.
671 #[inline]
672 #[must_use]
ceil(self) -> Self673 pub fn ceil(self) -> Self {
674 Self(unsafe { vrndpq_f32(self.0) })
675 }
676
677 /// Returns a vector containing the integer part each element of `self`. This means numbers are
678 /// always truncated towards zero.
679 #[inline]
680 #[must_use]
trunc(self) -> Self681 pub fn trunc(self) -> Self {
682 Self(unsafe { vrndq_f32(self.0) })
683 }
684
685 /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
686 ///
687 /// Note that this differs from the GLSL implementation of `fract` which returns
688 /// `self - self.floor()`.
689 ///
690 /// Note that this is fast but not precise for large numbers.
691 #[inline]
692 #[must_use]
fract(self) -> Self693 pub fn fract(self) -> Self {
694 self - self.trunc()
695 }
696
697 /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
698 ///
699 /// Note that this differs from the Rust implementation of `fract` which returns
700 /// `self - self.trunc()`.
701 ///
702 /// Note that this is fast but not precise for large numbers.
703 #[inline]
704 #[must_use]
fract_gl(self) -> Self705 pub fn fract_gl(self) -> Self {
706 self - self.floor()
707 }
708
709 /// Returns a vector containing `e^self` (the exponential function) for each element of
710 /// `self`.
711 #[inline]
712 #[must_use]
exp(self) -> Self713 pub fn exp(self) -> Self {
714 Self::new(
715 math::exp(self.x),
716 math::exp(self.y),
717 math::exp(self.z),
718 math::exp(self.w),
719 )
720 }
721
722 /// Returns a vector containing each element of `self` raised to the power of `n`.
723 #[inline]
724 #[must_use]
powf(self, n: f32) -> Self725 pub fn powf(self, n: f32) -> Self {
726 Self::new(
727 math::powf(self.x, n),
728 math::powf(self.y, n),
729 math::powf(self.z, n),
730 math::powf(self.w, n),
731 )
732 }
733
734 /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
735 #[inline]
736 #[must_use]
recip(self) -> Self737 pub fn recip(self) -> Self {
738 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
739 }
740
741 /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
742 ///
743 /// When `s` is `0.0`, the result will be equal to `self`. When `s` is `1.0`, the result
744 /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
745 /// extrapolated.
746 #[doc(alias = "mix")]
747 #[inline]
748 #[must_use]
lerp(self, rhs: Self, s: f32) -> Self749 pub fn lerp(self, rhs: Self, s: f32) -> Self {
750 self * (1.0 - s) + rhs * s
751 }
752
753 /// Moves towards `rhs` based on the value `d`.
754 ///
755 /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
756 /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
757 #[inline]
758 #[must_use]
move_towards(&self, rhs: Self, d: f32) -> Self759 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
760 let a = rhs - *self;
761 let len = a.length();
762 if len <= d || len <= 1e-4 {
763 return rhs;
764 }
765 *self + a / len * d
766 }
767
768 /// Calculates the midpoint between `self` and `rhs`.
769 ///
770 /// The midpoint is the average of, or halfway point between, two vectors.
771 /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
772 /// while being slightly cheaper to compute.
773 #[inline]
midpoint(self, rhs: Self) -> Self774 pub fn midpoint(self, rhs: Self) -> Self {
775 (self + rhs) * 0.5
776 }
777
778 /// Returns true if the absolute difference of all elements between `self` and `rhs` is
779 /// less than or equal to `max_abs_diff`.
780 ///
781 /// This can be used to compare if two vectors contain similar elements. It works best when
782 /// comparing with a known value. The `max_abs_diff` that should be used used depends on
783 /// the values being compared against.
784 ///
785 /// For more see
786 /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
787 #[inline]
788 #[must_use]
abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool789 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
790 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
791 }
792
793 /// Returns a vector with a length no less than `min` and no more than `max`.
794 ///
795 /// # Panics
796 ///
797 /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
798 #[inline]
799 #[must_use]
clamp_length(self, min: f32, max: f32) -> Self800 pub fn clamp_length(self, min: f32, max: f32) -> Self {
801 glam_assert!(0.0 <= min);
802 glam_assert!(min <= max);
803 let length_sq = self.length_squared();
804 if length_sq < min * min {
805 min * (self / math::sqrt(length_sq))
806 } else if length_sq > max * max {
807 max * (self / math::sqrt(length_sq))
808 } else {
809 self
810 }
811 }
812
813 /// Returns a vector with a length no more than `max`.
814 ///
815 /// # Panics
816 ///
817 /// Will panic if `max` is negative when `glam_assert` is enabled.
818 #[inline]
819 #[must_use]
clamp_length_max(self, max: f32) -> Self820 pub fn clamp_length_max(self, max: f32) -> Self {
821 glam_assert!(0.0 <= max);
822 let length_sq = self.length_squared();
823 if length_sq > max * max {
824 max * (self / math::sqrt(length_sq))
825 } else {
826 self
827 }
828 }
829
830 /// Returns a vector with a length no less than `min`.
831 ///
832 /// # Panics
833 ///
834 /// Will panic if `min` is negative when `glam_assert` is enabled.
835 #[inline]
836 #[must_use]
clamp_length_min(self, min: f32) -> Self837 pub fn clamp_length_min(self, min: f32) -> Self {
838 glam_assert!(0.0 <= min);
839 let length_sq = self.length_squared();
840 if length_sq < min * min {
841 min * (self / math::sqrt(length_sq))
842 } else {
843 self
844 }
845 }
846
847 /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
848 /// error, yielding a more accurate result than an unfused multiply-add.
849 ///
850 /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
851 /// architecture has a dedicated fma CPU instruction. However, this is not always true,
852 /// and will be heavily dependant on designing algorithms with specific target hardware in
853 /// mind.
854 #[inline]
855 #[must_use]
mul_add(self, a: Self, b: Self) -> Self856 pub fn mul_add(self, a: Self, b: Self) -> Self {
857 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
858 }
859
860 /// Returns the reflection vector for a given incident vector `self` and surface normal
861 /// `normal`.
862 ///
863 /// `normal` must be normalized.
864 ///
865 /// # Panics
866 ///
867 /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
868 #[inline]
869 #[must_use]
reflect(self, normal: Self) -> Self870 pub fn reflect(self, normal: Self) -> Self {
871 glam_assert!(normal.is_normalized());
872 self - 2.0 * self.dot(normal) * normal
873 }
874
875 /// Returns the refraction direction for a given incident vector `self`, surface normal
876 /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
877 /// a zero vector will be returned.
878 ///
879 /// `self` and `normal` must be normalized.
880 ///
881 /// # Panics
882 ///
883 /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
884 #[inline]
885 #[must_use]
refract(self, normal: Self, eta: f32) -> Self886 pub fn refract(self, normal: Self, eta: f32) -> Self {
887 glam_assert!(self.is_normalized());
888 glam_assert!(normal.is_normalized());
889 let n_dot_i = normal.dot(self);
890 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
891 if k >= 0.0 {
892 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
893 } else {
894 Self::ZERO
895 }
896 }
897
898 /// Casts all elements of `self` to `f64`.
899 #[inline]
900 #[must_use]
as_dvec4(&self) -> crate::DVec4901 pub fn as_dvec4(&self) -> crate::DVec4 {
902 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
903 }
904
905 /// Casts all elements of `self` to `i8`.
906 #[inline]
907 #[must_use]
as_i8vec4(&self) -> crate::I8Vec4908 pub fn as_i8vec4(&self) -> crate::I8Vec4 {
909 crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
910 }
911
912 /// Casts all elements of `self` to `u8`.
913 #[inline]
914 #[must_use]
as_u8vec4(&self) -> crate::U8Vec4915 pub fn as_u8vec4(&self) -> crate::U8Vec4 {
916 crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
917 }
918
919 /// Casts all elements of `self` to `i16`.
920 #[inline]
921 #[must_use]
as_i16vec4(&self) -> crate::I16Vec4922 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
923 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
924 }
925
926 /// Casts all elements of `self` to `u16`.
927 #[inline]
928 #[must_use]
as_u16vec4(&self) -> crate::U16Vec4929 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
930 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
931 }
932
933 /// Casts all elements of `self` to `i32`.
934 #[inline]
935 #[must_use]
as_ivec4(&self) -> crate::IVec4936 pub fn as_ivec4(&self) -> crate::IVec4 {
937 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
938 }
939
940 /// Casts all elements of `self` to `u32`.
941 #[inline]
942 #[must_use]
as_uvec4(&self) -> crate::UVec4943 pub fn as_uvec4(&self) -> crate::UVec4 {
944 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
945 }
946
947 /// Casts all elements of `self` to `i64`.
948 #[inline]
949 #[must_use]
as_i64vec4(&self) -> crate::I64Vec4950 pub fn as_i64vec4(&self) -> crate::I64Vec4 {
951 crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
952 }
953
954 /// Casts all elements of `self` to `u64`.
955 #[inline]
956 #[must_use]
as_u64vec4(&self) -> crate::U64Vec4957 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
958 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
959 }
960 }
961
962 impl Default for Vec4 {
963 #[inline(always)]
default() -> Self964 fn default() -> Self {
965 Self::ZERO
966 }
967 }
968
969 impl PartialEq for Vec4 {
970 #[inline]
eq(&self, rhs: &Self) -> bool971 fn eq(&self, rhs: &Self) -> bool {
972 self.cmpeq(*rhs).all()
973 }
974 }
975
976 impl Div<Vec4> for Vec4 {
977 type Output = Self;
978 #[inline]
div(self, rhs: Self) -> Self979 fn div(self, rhs: Self) -> Self {
980 Self(unsafe { vdivq_f32(self.0, rhs.0) })
981 }
982 }
983
984 impl Div<&Vec4> for Vec4 {
985 type Output = Vec4;
986 #[inline]
div(self, rhs: &Vec4) -> Vec4987 fn div(self, rhs: &Vec4) -> Vec4 {
988 self.div(*rhs)
989 }
990 }
991
992 impl Div<&Vec4> for &Vec4 {
993 type Output = Vec4;
994 #[inline]
div(self, rhs: &Vec4) -> Vec4995 fn div(self, rhs: &Vec4) -> Vec4 {
996 (*self).div(*rhs)
997 }
998 }
999
1000 impl Div<Vec4> for &Vec4 {
1001 type Output = Vec4;
1002 #[inline]
div(self, rhs: Vec4) -> Vec41003 fn div(self, rhs: Vec4) -> Vec4 {
1004 (*self).div(rhs)
1005 }
1006 }
1007
1008 impl DivAssign<Vec4> for Vec4 {
1009 #[inline]
div_assign(&mut self, rhs: Self)1010 fn div_assign(&mut self, rhs: Self) {
1011 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1012 }
1013 }
1014
1015 impl DivAssign<&Vec4> for Vec4 {
1016 #[inline]
div_assign(&mut self, rhs: &Vec4)1017 fn div_assign(&mut self, rhs: &Vec4) {
1018 self.div_assign(*rhs)
1019 }
1020 }
1021
1022 impl Div<f32> for Vec4 {
1023 type Output = Self;
1024 #[inline]
div(self, rhs: f32) -> Self1025 fn div(self, rhs: f32) -> Self {
1026 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1027 }
1028 }
1029
1030 impl Div<&f32> for Vec4 {
1031 type Output = Vec4;
1032 #[inline]
div(self, rhs: &f32) -> Vec41033 fn div(self, rhs: &f32) -> Vec4 {
1034 self.div(*rhs)
1035 }
1036 }
1037
1038 impl Div<&f32> for &Vec4 {
1039 type Output = Vec4;
1040 #[inline]
div(self, rhs: &f32) -> Vec41041 fn div(self, rhs: &f32) -> Vec4 {
1042 (*self).div(*rhs)
1043 }
1044 }
1045
1046 impl Div<f32> for &Vec4 {
1047 type Output = Vec4;
1048 #[inline]
div(self, rhs: f32) -> Vec41049 fn div(self, rhs: f32) -> Vec4 {
1050 (*self).div(rhs)
1051 }
1052 }
1053
1054 impl DivAssign<f32> for Vec4 {
1055 #[inline]
div_assign(&mut self, rhs: f32)1056 fn div_assign(&mut self, rhs: f32) {
1057 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1058 }
1059 }
1060
1061 impl DivAssign<&f32> for Vec4 {
1062 #[inline]
div_assign(&mut self, rhs: &f32)1063 fn div_assign(&mut self, rhs: &f32) {
1064 self.div_assign(*rhs)
1065 }
1066 }
1067
1068 impl Div<Vec4> for f32 {
1069 type Output = Vec4;
1070 #[inline]
div(self, rhs: Vec4) -> Vec41071 fn div(self, rhs: Vec4) -> Vec4 {
1072 Vec4(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1073 }
1074 }
1075
1076 impl Div<&Vec4> for f32 {
1077 type Output = Vec4;
1078 #[inline]
div(self, rhs: &Vec4) -> Vec41079 fn div(self, rhs: &Vec4) -> Vec4 {
1080 self.div(*rhs)
1081 }
1082 }
1083
1084 impl Div<&Vec4> for &f32 {
1085 type Output = Vec4;
1086 #[inline]
div(self, rhs: &Vec4) -> Vec41087 fn div(self, rhs: &Vec4) -> Vec4 {
1088 (*self).div(*rhs)
1089 }
1090 }
1091
1092 impl Div<Vec4> for &f32 {
1093 type Output = Vec4;
1094 #[inline]
div(self, rhs: Vec4) -> Vec41095 fn div(self, rhs: Vec4) -> Vec4 {
1096 (*self).div(rhs)
1097 }
1098 }
1099
1100 impl Mul<Vec4> for Vec4 {
1101 type Output = Self;
1102 #[inline]
mul(self, rhs: Self) -> Self1103 fn mul(self, rhs: Self) -> Self {
1104 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1105 }
1106 }
1107
1108 impl Mul<&Vec4> for Vec4 {
1109 type Output = Vec4;
1110 #[inline]
mul(self, rhs: &Vec4) -> Vec41111 fn mul(self, rhs: &Vec4) -> Vec4 {
1112 self.mul(*rhs)
1113 }
1114 }
1115
1116 impl Mul<&Vec4> for &Vec4 {
1117 type Output = Vec4;
1118 #[inline]
mul(self, rhs: &Vec4) -> Vec41119 fn mul(self, rhs: &Vec4) -> Vec4 {
1120 (*self).mul(*rhs)
1121 }
1122 }
1123
1124 impl Mul<Vec4> for &Vec4 {
1125 type Output = Vec4;
1126 #[inline]
mul(self, rhs: Vec4) -> Vec41127 fn mul(self, rhs: Vec4) -> Vec4 {
1128 (*self).mul(rhs)
1129 }
1130 }
1131
1132 impl MulAssign<Vec4> for Vec4 {
1133 #[inline]
mul_assign(&mut self, rhs: Self)1134 fn mul_assign(&mut self, rhs: Self) {
1135 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1136 }
1137 }
1138
1139 impl MulAssign<&Vec4> for Vec4 {
1140 #[inline]
mul_assign(&mut self, rhs: &Vec4)1141 fn mul_assign(&mut self, rhs: &Vec4) {
1142 self.mul_assign(*rhs)
1143 }
1144 }
1145
1146 impl Mul<f32> for Vec4 {
1147 type Output = Self;
1148 #[inline]
mul(self, rhs: f32) -> Self1149 fn mul(self, rhs: f32) -> Self {
1150 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1151 }
1152 }
1153
1154 impl Mul<&f32> for Vec4 {
1155 type Output = Vec4;
1156 #[inline]
mul(self, rhs: &f32) -> Vec41157 fn mul(self, rhs: &f32) -> Vec4 {
1158 self.mul(*rhs)
1159 }
1160 }
1161
1162 impl Mul<&f32> for &Vec4 {
1163 type Output = Vec4;
1164 #[inline]
mul(self, rhs: &f32) -> Vec41165 fn mul(self, rhs: &f32) -> Vec4 {
1166 (*self).mul(*rhs)
1167 }
1168 }
1169
1170 impl Mul<f32> for &Vec4 {
1171 type Output = Vec4;
1172 #[inline]
mul(self, rhs: f32) -> Vec41173 fn mul(self, rhs: f32) -> Vec4 {
1174 (*self).mul(rhs)
1175 }
1176 }
1177
1178 impl MulAssign<f32> for Vec4 {
1179 #[inline]
mul_assign(&mut self, rhs: f32)1180 fn mul_assign(&mut self, rhs: f32) {
1181 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1182 }
1183 }
1184
1185 impl MulAssign<&f32> for Vec4 {
1186 #[inline]
mul_assign(&mut self, rhs: &f32)1187 fn mul_assign(&mut self, rhs: &f32) {
1188 self.mul_assign(*rhs)
1189 }
1190 }
1191
1192 impl Mul<Vec4> for f32 {
1193 type Output = Vec4;
1194 #[inline]
mul(self, rhs: Vec4) -> Vec41195 fn mul(self, rhs: Vec4) -> Vec4 {
1196 Vec4(unsafe { vmulq_n_f32(rhs.0, self) })
1197 }
1198 }
1199
1200 impl Mul<&Vec4> for f32 {
1201 type Output = Vec4;
1202 #[inline]
mul(self, rhs: &Vec4) -> Vec41203 fn mul(self, rhs: &Vec4) -> Vec4 {
1204 self.mul(*rhs)
1205 }
1206 }
1207
1208 impl Mul<&Vec4> for &f32 {
1209 type Output = Vec4;
1210 #[inline]
mul(self, rhs: &Vec4) -> Vec41211 fn mul(self, rhs: &Vec4) -> Vec4 {
1212 (*self).mul(*rhs)
1213 }
1214 }
1215
1216 impl Mul<Vec4> for &f32 {
1217 type Output = Vec4;
1218 #[inline]
mul(self, rhs: Vec4) -> Vec41219 fn mul(self, rhs: Vec4) -> Vec4 {
1220 (*self).mul(rhs)
1221 }
1222 }
1223
1224 impl Add<Vec4> for Vec4 {
1225 type Output = Self;
1226 #[inline]
add(self, rhs: Self) -> Self1227 fn add(self, rhs: Self) -> Self {
1228 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1229 }
1230 }
1231
1232 impl Add<&Vec4> for Vec4 {
1233 type Output = Vec4;
1234 #[inline]
add(self, rhs: &Vec4) -> Vec41235 fn add(self, rhs: &Vec4) -> Vec4 {
1236 self.add(*rhs)
1237 }
1238 }
1239
1240 impl Add<&Vec4> for &Vec4 {
1241 type Output = Vec4;
1242 #[inline]
add(self, rhs: &Vec4) -> Vec41243 fn add(self, rhs: &Vec4) -> Vec4 {
1244 (*self).add(*rhs)
1245 }
1246 }
1247
1248 impl Add<Vec4> for &Vec4 {
1249 type Output = Vec4;
1250 #[inline]
add(self, rhs: Vec4) -> Vec41251 fn add(self, rhs: Vec4) -> Vec4 {
1252 (*self).add(rhs)
1253 }
1254 }
1255
1256 impl AddAssign<Vec4> for Vec4 {
1257 #[inline]
add_assign(&mut self, rhs: Self)1258 fn add_assign(&mut self, rhs: Self) {
1259 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1260 }
1261 }
1262
1263 impl AddAssign<&Vec4> for Vec4 {
1264 #[inline]
add_assign(&mut self, rhs: &Vec4)1265 fn add_assign(&mut self, rhs: &Vec4) {
1266 self.add_assign(*rhs)
1267 }
1268 }
1269
1270 impl Add<f32> for Vec4 {
1271 type Output = Self;
1272 #[inline]
add(self, rhs: f32) -> Self1273 fn add(self, rhs: f32) -> Self {
1274 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1275 }
1276 }
1277
1278 impl Add<&f32> for Vec4 {
1279 type Output = Vec4;
1280 #[inline]
add(self, rhs: &f32) -> Vec41281 fn add(self, rhs: &f32) -> Vec4 {
1282 self.add(*rhs)
1283 }
1284 }
1285
1286 impl Add<&f32> for &Vec4 {
1287 type Output = Vec4;
1288 #[inline]
add(self, rhs: &f32) -> Vec41289 fn add(self, rhs: &f32) -> Vec4 {
1290 (*self).add(*rhs)
1291 }
1292 }
1293
1294 impl Add<f32> for &Vec4 {
1295 type Output = Vec4;
1296 #[inline]
add(self, rhs: f32) -> Vec41297 fn add(self, rhs: f32) -> Vec4 {
1298 (*self).add(rhs)
1299 }
1300 }
1301
1302 impl AddAssign<f32> for Vec4 {
1303 #[inline]
add_assign(&mut self, rhs: f32)1304 fn add_assign(&mut self, rhs: f32) {
1305 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1306 }
1307 }
1308
1309 impl AddAssign<&f32> for Vec4 {
1310 #[inline]
add_assign(&mut self, rhs: &f32)1311 fn add_assign(&mut self, rhs: &f32) {
1312 self.add_assign(*rhs)
1313 }
1314 }
1315
1316 impl Add<Vec4> for f32 {
1317 type Output = Vec4;
1318 #[inline]
add(self, rhs: Vec4) -> Vec41319 fn add(self, rhs: Vec4) -> Vec4 {
1320 Vec4(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1321 }
1322 }
1323
1324 impl Add<&Vec4> for f32 {
1325 type Output = Vec4;
1326 #[inline]
add(self, rhs: &Vec4) -> Vec41327 fn add(self, rhs: &Vec4) -> Vec4 {
1328 self.add(*rhs)
1329 }
1330 }
1331
1332 impl Add<&Vec4> for &f32 {
1333 type Output = Vec4;
1334 #[inline]
add(self, rhs: &Vec4) -> Vec41335 fn add(self, rhs: &Vec4) -> Vec4 {
1336 (*self).add(*rhs)
1337 }
1338 }
1339
1340 impl Add<Vec4> for &f32 {
1341 type Output = Vec4;
1342 #[inline]
add(self, rhs: Vec4) -> Vec41343 fn add(self, rhs: Vec4) -> Vec4 {
1344 (*self).add(rhs)
1345 }
1346 }
1347
1348 impl Sub<Vec4> for Vec4 {
1349 type Output = Self;
1350 #[inline]
sub(self, rhs: Self) -> Self1351 fn sub(self, rhs: Self) -> Self {
1352 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1353 }
1354 }
1355
1356 impl Sub<&Vec4> for Vec4 {
1357 type Output = Vec4;
1358 #[inline]
sub(self, rhs: &Vec4) -> Vec41359 fn sub(self, rhs: &Vec4) -> Vec4 {
1360 self.sub(*rhs)
1361 }
1362 }
1363
1364 impl Sub<&Vec4> for &Vec4 {
1365 type Output = Vec4;
1366 #[inline]
sub(self, rhs: &Vec4) -> Vec41367 fn sub(self, rhs: &Vec4) -> Vec4 {
1368 (*self).sub(*rhs)
1369 }
1370 }
1371
1372 impl Sub<Vec4> for &Vec4 {
1373 type Output = Vec4;
1374 #[inline]
sub(self, rhs: Vec4) -> Vec41375 fn sub(self, rhs: Vec4) -> Vec4 {
1376 (*self).sub(rhs)
1377 }
1378 }
1379
1380 impl SubAssign<Vec4> for Vec4 {
1381 #[inline]
sub_assign(&mut self, rhs: Vec4)1382 fn sub_assign(&mut self, rhs: Vec4) {
1383 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1384 }
1385 }
1386
1387 impl SubAssign<&Vec4> for Vec4 {
1388 #[inline]
sub_assign(&mut self, rhs: &Vec4)1389 fn sub_assign(&mut self, rhs: &Vec4) {
1390 self.sub_assign(*rhs)
1391 }
1392 }
1393
1394 impl Sub<f32> for Vec4 {
1395 type Output = Self;
1396 #[inline]
sub(self, rhs: f32) -> Self1397 fn sub(self, rhs: f32) -> Self {
1398 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1399 }
1400 }
1401
1402 impl Sub<&f32> for Vec4 {
1403 type Output = Vec4;
1404 #[inline]
sub(self, rhs: &f32) -> Vec41405 fn sub(self, rhs: &f32) -> Vec4 {
1406 self.sub(*rhs)
1407 }
1408 }
1409
1410 impl Sub<&f32> for &Vec4 {
1411 type Output = Vec4;
1412 #[inline]
sub(self, rhs: &f32) -> Vec41413 fn sub(self, rhs: &f32) -> Vec4 {
1414 (*self).sub(*rhs)
1415 }
1416 }
1417
1418 impl Sub<f32> for &Vec4 {
1419 type Output = Vec4;
1420 #[inline]
sub(self, rhs: f32) -> Vec41421 fn sub(self, rhs: f32) -> Vec4 {
1422 (*self).sub(rhs)
1423 }
1424 }
1425
1426 impl SubAssign<f32> for Vec4 {
1427 #[inline]
sub_assign(&mut self, rhs: f32)1428 fn sub_assign(&mut self, rhs: f32) {
1429 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1430 }
1431 }
1432
1433 impl SubAssign<&f32> for Vec4 {
1434 #[inline]
sub_assign(&mut self, rhs: &f32)1435 fn sub_assign(&mut self, rhs: &f32) {
1436 self.sub_assign(*rhs)
1437 }
1438 }
1439
1440 impl Sub<Vec4> for f32 {
1441 type Output = Vec4;
1442 #[inline]
sub(self, rhs: Vec4) -> Vec41443 fn sub(self, rhs: Vec4) -> Vec4 {
1444 Vec4(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1445 }
1446 }
1447
1448 impl Sub<&Vec4> for f32 {
1449 type Output = Vec4;
1450 #[inline]
sub(self, rhs: &Vec4) -> Vec41451 fn sub(self, rhs: &Vec4) -> Vec4 {
1452 self.sub(*rhs)
1453 }
1454 }
1455
1456 impl Sub<&Vec4> for &f32 {
1457 type Output = Vec4;
1458 #[inline]
sub(self, rhs: &Vec4) -> Vec41459 fn sub(self, rhs: &Vec4) -> Vec4 {
1460 (*self).sub(*rhs)
1461 }
1462 }
1463
1464 impl Sub<Vec4> for &f32 {
1465 type Output = Vec4;
1466 #[inline]
sub(self, rhs: Vec4) -> Vec41467 fn sub(self, rhs: Vec4) -> Vec4 {
1468 (*self).sub(rhs)
1469 }
1470 }
1471
1472 impl Rem<Vec4> for Vec4 {
1473 type Output = Self;
1474 #[inline]
rem(self, rhs: Self) -> Self1475 fn rem(self, rhs: Self) -> Self {
1476 unsafe {
1477 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1478 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1479 }
1480 }
1481 }
1482
1483 impl Rem<&Vec4> for Vec4 {
1484 type Output = Vec4;
1485 #[inline]
rem(self, rhs: &Vec4) -> Vec41486 fn rem(self, rhs: &Vec4) -> Vec4 {
1487 self.rem(*rhs)
1488 }
1489 }
1490
1491 impl Rem<&Vec4> for &Vec4 {
1492 type Output = Vec4;
1493 #[inline]
rem(self, rhs: &Vec4) -> Vec41494 fn rem(self, rhs: &Vec4) -> Vec4 {
1495 (*self).rem(*rhs)
1496 }
1497 }
1498
1499 impl Rem<Vec4> for &Vec4 {
1500 type Output = Vec4;
1501 #[inline]
rem(self, rhs: Vec4) -> Vec41502 fn rem(self, rhs: Vec4) -> Vec4 {
1503 (*self).rem(rhs)
1504 }
1505 }
1506
1507 impl RemAssign<Vec4> for Vec4 {
1508 #[inline]
rem_assign(&mut self, rhs: Self)1509 fn rem_assign(&mut self, rhs: Self) {
1510 *self = self.rem(rhs);
1511 }
1512 }
1513
1514 impl RemAssign<&Vec4> for Vec4 {
1515 #[inline]
rem_assign(&mut self, rhs: &Vec4)1516 fn rem_assign(&mut self, rhs: &Vec4) {
1517 self.rem_assign(*rhs)
1518 }
1519 }
1520
1521 impl Rem<f32> for Vec4 {
1522 type Output = Self;
1523 #[inline]
rem(self, rhs: f32) -> Self1524 fn rem(self, rhs: f32) -> Self {
1525 self.rem(Self::splat(rhs))
1526 }
1527 }
1528
1529 impl Rem<&f32> for Vec4 {
1530 type Output = Vec4;
1531 #[inline]
rem(self, rhs: &f32) -> Vec41532 fn rem(self, rhs: &f32) -> Vec4 {
1533 self.rem(*rhs)
1534 }
1535 }
1536
1537 impl Rem<&f32> for &Vec4 {
1538 type Output = Vec4;
1539 #[inline]
rem(self, rhs: &f32) -> Vec41540 fn rem(self, rhs: &f32) -> Vec4 {
1541 (*self).rem(*rhs)
1542 }
1543 }
1544
1545 impl Rem<f32> for &Vec4 {
1546 type Output = Vec4;
1547 #[inline]
rem(self, rhs: f32) -> Vec41548 fn rem(self, rhs: f32) -> Vec4 {
1549 (*self).rem(rhs)
1550 }
1551 }
1552
1553 impl RemAssign<f32> for Vec4 {
1554 #[inline]
rem_assign(&mut self, rhs: f32)1555 fn rem_assign(&mut self, rhs: f32) {
1556 *self = self.rem(Self::splat(rhs));
1557 }
1558 }
1559
1560 impl RemAssign<&f32> for Vec4 {
1561 #[inline]
rem_assign(&mut self, rhs: &f32)1562 fn rem_assign(&mut self, rhs: &f32) {
1563 self.rem_assign(*rhs)
1564 }
1565 }
1566
1567 impl Rem<Vec4> for f32 {
1568 type Output = Vec4;
1569 #[inline]
rem(self, rhs: Vec4) -> Vec41570 fn rem(self, rhs: Vec4) -> Vec4 {
1571 Vec4::splat(self).rem(rhs)
1572 }
1573 }
1574
1575 impl Rem<&Vec4> for f32 {
1576 type Output = Vec4;
1577 #[inline]
rem(self, rhs: &Vec4) -> Vec41578 fn rem(self, rhs: &Vec4) -> Vec4 {
1579 self.rem(*rhs)
1580 }
1581 }
1582
1583 impl Rem<&Vec4> for &f32 {
1584 type Output = Vec4;
1585 #[inline]
rem(self, rhs: &Vec4) -> Vec41586 fn rem(self, rhs: &Vec4) -> Vec4 {
1587 (*self).rem(*rhs)
1588 }
1589 }
1590
1591 impl Rem<Vec4> for &f32 {
1592 type Output = Vec4;
1593 #[inline]
rem(self, rhs: Vec4) -> Vec41594 fn rem(self, rhs: Vec4) -> Vec4 {
1595 (*self).rem(rhs)
1596 }
1597 }
1598
1599 #[cfg(not(target_arch = "spirv"))]
1600 impl AsRef<[f32; 4]> for Vec4 {
1601 #[inline]
as_ref(&self) -> &[f32; 4]1602 fn as_ref(&self) -> &[f32; 4] {
1603 unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
1604 }
1605 }
1606
1607 #[cfg(not(target_arch = "spirv"))]
1608 impl AsMut<[f32; 4]> for Vec4 {
1609 #[inline]
as_mut(&mut self) -> &mut [f32; 4]1610 fn as_mut(&mut self) -> &mut [f32; 4] {
1611 unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
1612 }
1613 }
1614
1615 impl Sum for Vec4 {
1616 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = Self>,1617 fn sum<I>(iter: I) -> Self
1618 where
1619 I: Iterator<Item = Self>,
1620 {
1621 iter.fold(Self::ZERO, Self::add)
1622 }
1623 }
1624
1625 impl<'a> Sum<&'a Self> for Vec4 {
1626 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1627 fn sum<I>(iter: I) -> Self
1628 where
1629 I: Iterator<Item = &'a Self>,
1630 {
1631 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1632 }
1633 }
1634
1635 impl Product for Vec4 {
1636 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = Self>,1637 fn product<I>(iter: I) -> Self
1638 where
1639 I: Iterator<Item = Self>,
1640 {
1641 iter.fold(Self::ONE, Self::mul)
1642 }
1643 }
1644
1645 impl<'a> Product<&'a Self> for Vec4 {
1646 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1647 fn product<I>(iter: I) -> Self
1648 where
1649 I: Iterator<Item = &'a Self>,
1650 {
1651 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1652 }
1653 }
1654
1655 impl Neg for Vec4 {
1656 type Output = Self;
1657 #[inline]
neg(self) -> Self1658 fn neg(self) -> Self {
1659 Self(unsafe { vnegq_f32(self.0) })
1660 }
1661 }
1662
1663 impl Neg for &Vec4 {
1664 type Output = Vec4;
1665 #[inline]
neg(self) -> Vec41666 fn neg(self) -> Vec4 {
1667 (*self).neg()
1668 }
1669 }
1670
1671 impl Index<usize> for Vec4 {
1672 type Output = f32;
1673 #[inline]
index(&self, index: usize) -> &Self::Output1674 fn index(&self, index: usize) -> &Self::Output {
1675 match index {
1676 0 => &self.x,
1677 1 => &self.y,
1678 2 => &self.z,
1679 3 => &self.w,
1680 _ => panic!("index out of bounds"),
1681 }
1682 }
1683 }
1684
1685 impl IndexMut<usize> for Vec4 {
1686 #[inline]
index_mut(&mut self, index: usize) -> &mut Self::Output1687 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1688 match index {
1689 0 => &mut self.x,
1690 1 => &mut self.y,
1691 2 => &mut self.z,
1692 3 => &mut self.w,
1693 _ => panic!("index out of bounds"),
1694 }
1695 }
1696 }
1697
1698 impl fmt::Display for Vec4 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1699 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1700 if let Some(p) = f.precision() {
1701 write!(
1702 f,
1703 "[{:.*}, {:.*}, {:.*}, {:.*}]",
1704 p, self.x, p, self.y, p, self.z, p, self.w
1705 )
1706 } else {
1707 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1708 }
1709 }
1710 }
1711
1712 impl fmt::Debug for Vec4 {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result1713 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1714 fmt.debug_tuple(stringify!(Vec4))
1715 .field(&self.x)
1716 .field(&self.y)
1717 .field(&self.z)
1718 .field(&self.w)
1719 .finish()
1720 }
1721 }
1722
1723 impl From<Vec4> for float32x4_t {
1724 #[inline(always)]
from(t: Vec4) -> Self1725 fn from(t: Vec4) -> Self {
1726 t.0
1727 }
1728 }
1729
1730 impl From<float32x4_t> for Vec4 {
1731 #[inline(always)]
from(t: float32x4_t) -> Self1732 fn from(t: float32x4_t) -> Self {
1733 Self(t)
1734 }
1735 }
1736
1737 impl From<[f32; 4]> for Vec4 {
1738 #[inline]
from(a: [f32; 4]) -> Self1739 fn from(a: [f32; 4]) -> Self {
1740 Self(unsafe { vld1q_f32(a.as_ptr()) })
1741 }
1742 }
1743
1744 impl From<Vec4> for [f32; 4] {
1745 #[inline]
from(v: Vec4) -> Self1746 fn from(v: Vec4) -> Self {
1747 use crate::align16::Align16;
1748 use core::mem::MaybeUninit;
1749 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1750 unsafe {
1751 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1752 out.assume_init().0
1753 }
1754 }
1755 }
1756
1757 impl From<(f32, f32, f32, f32)> for Vec4 {
1758 #[inline]
from(t: (f32, f32, f32, f32)) -> Self1759 fn from(t: (f32, f32, f32, f32)) -> Self {
1760 Self::new(t.0, t.1, t.2, t.3)
1761 }
1762 }
1763
1764 impl From<Vec4> for (f32, f32, f32, f32) {
1765 #[inline]
from(v: Vec4) -> Self1766 fn from(v: Vec4) -> Self {
1767 use crate::align16::Align16;
1768 use core::mem::MaybeUninit;
1769 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1770 unsafe {
1771 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1772 out.assume_init().0
1773 }
1774 }
1775 }
1776
1777 impl From<(Vec3A, f32)> for Vec4 {
1778 #[inline]
from((v, w): (Vec3A, f32)) -> Self1779 fn from((v, w): (Vec3A, f32)) -> Self {
1780 v.extend(w)
1781 }
1782 }
1783
1784 impl From<(f32, Vec3A)> for Vec4 {
1785 #[inline]
from((x, v): (f32, Vec3A)) -> Self1786 fn from((x, v): (f32, Vec3A)) -> Self {
1787 Self::new(x, v.x, v.y, v.z)
1788 }
1789 }
1790
1791 impl From<(Vec3, f32)> for Vec4 {
1792 #[inline]
from((v, w): (Vec3, f32)) -> Self1793 fn from((v, w): (Vec3, f32)) -> Self {
1794 Self::new(v.x, v.y, v.z, w)
1795 }
1796 }
1797
1798 impl From<(f32, Vec3)> for Vec4 {
1799 #[inline]
from((x, v): (f32, Vec3)) -> Self1800 fn from((x, v): (f32, Vec3)) -> Self {
1801 Self::new(x, v.x, v.y, v.z)
1802 }
1803 }
1804
1805 impl From<(Vec2, f32, f32)> for Vec4 {
1806 #[inline]
from((v, z, w): (Vec2, f32, f32)) -> Self1807 fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1808 Self::new(v.x, v.y, z, w)
1809 }
1810 }
1811
1812 impl From<(Vec2, Vec2)> for Vec4 {
1813 #[inline]
from((v, u): (Vec2, Vec2)) -> Self1814 fn from((v, u): (Vec2, Vec2)) -> Self {
1815 Self::new(v.x, v.y, u.x, u.y)
1816 }
1817 }
1818
1819 impl Deref for Vec4 {
1820 type Target = crate::deref::Vec4<f32>;
1821 #[inline]
deref(&self) -> &Self::Target1822 fn deref(&self) -> &Self::Target {
1823 unsafe { &*(self as *const Self).cast() }
1824 }
1825 }
1826
1827 impl DerefMut for Vec4 {
1828 #[inline]
deref_mut(&mut self) -> &mut Self::Target1829 fn deref_mut(&mut self) -> &mut Self::Target {
1830 unsafe { &mut *(self as *mut Self).cast() }
1831 }
1832 }
1833
1834 impl From<BVec4> for Vec4 {
1835 #[inline]
from(v: BVec4) -> Self1836 fn from(v: BVec4) -> Self {
1837 Self::new(
1838 f32::from(v.x),
1839 f32::from(v.y),
1840 f32::from(v.z),
1841 f32::from(v.w),
1842 )
1843 }
1844 }
1845
1846 #[cfg(not(feature = "scalar-math"))]
1847 impl From<BVec4A> for Vec4 {
1848 #[inline]
from(v: BVec4A) -> Self1849 fn from(v: BVec4A) -> Self {
1850 let bool_array: [bool; 4] = v.into();
1851 Self::new(
1852 f32::from(bool_array[0]),
1853 f32::from(bool_array[1]),
1854 f32::from(bool_array[2]),
1855 f32::from(bool_array[3]),
1856 )
1857 }
1858 }
1859