1 // Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3 use crate::{f32::math, wasm32::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5 use core::fmt;
6 use core::iter::{Product, Sum};
7 use core::{f32, ops::*};
8
9 use core::arch::wasm32::*;
10
11 /// Creates a 4-dimensional vector.
12 #[inline(always)]
13 #[must_use]
vec4(x: f32, y: f32, z: f32, w: f32) -> Vec414 pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
15 Vec4::new(x, y, z, w)
16 }
17
18 /// A 4-dimensional vector.
19 ///
20 /// SIMD vector types are used for storage on supported platforms.
21 ///
22 /// This type is 16 byte aligned.
23 #[derive(Clone, Copy)]
24 #[repr(transparent)]
25 pub struct Vec4(pub(crate) v128);
26
27 impl Vec4 {
28 /// All zeroes.
29 pub const ZERO: Self = Self::splat(0.0);
30
31 /// All ones.
32 pub const ONE: Self = Self::splat(1.0);
33
34 /// All negative ones.
35 pub const NEG_ONE: Self = Self::splat(-1.0);
36
37 /// All `f32::MIN`.
38 pub const MIN: Self = Self::splat(f32::MIN);
39
40 /// All `f32::MAX`.
41 pub const MAX: Self = Self::splat(f32::MAX);
42
43 /// All `f32::NAN`.
44 pub const NAN: Self = Self::splat(f32::NAN);
45
46 /// All `f32::INFINITY`.
47 pub const INFINITY: Self = Self::splat(f32::INFINITY);
48
49 /// All `f32::NEG_INFINITY`.
50 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
51
52 /// A unit vector pointing along the positive X axis.
53 pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
54
55 /// A unit vector pointing along the positive Y axis.
56 pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
57
58 /// A unit vector pointing along the positive Z axis.
59 pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
60
61 /// A unit vector pointing along the positive W axis.
62 pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
63
64 /// A unit vector pointing along the negative X axis.
65 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
66
67 /// A unit vector pointing along the negative Y axis.
68 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
69
70 /// A unit vector pointing along the negative Z axis.
71 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
72
73 /// A unit vector pointing along the negative W axis.
74 pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
75
76 /// The unit axes.
77 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
78
79 /// Creates a new vector.
80 #[inline(always)]
81 #[must_use]
new(x: f32, y: f32, z: f32, w: f32) -> Self82 pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
83 Self(f32x4(x, y, z, w))
84 }
85
86 /// Creates a vector with all elements set to `v`.
87 #[inline]
88 #[must_use]
splat(v: f32) -> Self89 pub const fn splat(v: f32) -> Self {
90 Self(f32x4(v, v, v, v))
91 }
92
93 /// Returns a vector containing each element of `self` modified by a mapping function `f`.
94 #[inline]
95 #[must_use]
map<F>(self, f: F) -> Self where F: Fn(f32) -> f32,96 pub fn map<F>(self, f: F) -> Self
97 where
98 F: Fn(f32) -> f32,
99 {
100 Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
101 }
102
103 /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
104 /// for each element of `self`.
105 ///
106 /// A true element in the mask uses the corresponding element from `if_true`, and false
107 /// uses the element from `if_false`.
108 #[inline]
109 #[must_use]
select(mask: BVec4A, if_true: Self, if_false: Self) -> Self110 pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
111 Self(v128_bitselect(if_true.0, if_false.0, mask.0))
112 }
113
114 /// Creates a new vector from an array.
115 #[inline]
116 #[must_use]
from_array(a: [f32; 4]) -> Self117 pub const fn from_array(a: [f32; 4]) -> Self {
118 Self::new(a[0], a[1], a[2], a[3])
119 }
120
121 /// `[x, y, z, w]`
122 #[inline]
123 #[must_use]
to_array(&self) -> [f32; 4]124 pub const fn to_array(&self) -> [f32; 4] {
125 unsafe { *(self as *const Vec4 as *const [f32; 4]) }
126 }
127
128 /// Creates a vector from the first 4 values in `slice`.
129 ///
130 /// # Panics
131 ///
132 /// Panics if `slice` is less than 4 elements long.
133 #[inline]
134 #[must_use]
from_slice(slice: &[f32]) -> Self135 pub const fn from_slice(slice: &[f32]) -> Self {
136 assert!(slice.len() >= 4);
137 Self::new(slice[0], slice[1], slice[2], slice[3])
138 }
139
140 /// Writes the elements of `self` to the first 4 elements in `slice`.
141 ///
142 /// # Panics
143 ///
144 /// Panics if `slice` is less than 4 elements long.
145 #[inline]
write_to_slice(self, slice: &mut [f32])146 pub fn write_to_slice(self, slice: &mut [f32]) {
147 slice[..4].copy_from_slice(&self.to_array());
148 }
149
150 /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
151 ///
152 /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
153 ///
154 /// To truncate to [`Vec3A`] use [`Vec3A::from_vec4()`].
155 #[inline]
156 #[must_use]
truncate(self) -> Vec3157 pub fn truncate(self) -> Vec3 {
158 use crate::swizzles::Vec4Swizzles;
159 self.xyz()
160 }
161
162 /// Creates a 4D vector from `self` with the given value of `x`.
163 #[inline]
164 #[must_use]
with_x(mut self, x: f32) -> Self165 pub fn with_x(mut self, x: f32) -> Self {
166 self.x = x;
167 self
168 }
169
170 /// Creates a 4D vector from `self` with the given value of `y`.
171 #[inline]
172 #[must_use]
with_y(mut self, y: f32) -> Self173 pub fn with_y(mut self, y: f32) -> Self {
174 self.y = y;
175 self
176 }
177
178 /// Creates a 4D vector from `self` with the given value of `z`.
179 #[inline]
180 #[must_use]
with_z(mut self, z: f32) -> Self181 pub fn with_z(mut self, z: f32) -> Self {
182 self.z = z;
183 self
184 }
185
186 /// Creates a 4D vector from `self` with the given value of `w`.
187 #[inline]
188 #[must_use]
with_w(mut self, w: f32) -> Self189 pub fn with_w(mut self, w: f32) -> Self {
190 self.w = w;
191 self
192 }
193
194 /// Computes the dot product of `self` and `rhs`.
195 #[inline]
196 #[must_use]
dot(self, rhs: Self) -> f32197 pub fn dot(self, rhs: Self) -> f32 {
198 dot4(self.0, rhs.0)
199 }
200
201 /// Returns a vector where every component is the dot product of `self` and `rhs`.
202 #[inline]
203 #[must_use]
dot_into_vec(self, rhs: Self) -> Self204 pub fn dot_into_vec(self, rhs: Self) -> Self {
205 Self(dot4_into_v128(self.0, rhs.0))
206 }
207
208 /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
209 ///
210 /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`.
211 #[inline]
212 #[must_use]
min(self, rhs: Self) -> Self213 pub fn min(self, rhs: Self) -> Self {
214 Self(f32x4_pmin(self.0, rhs.0))
215 }
216
217 /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
218 ///
219 /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`.
220 #[inline]
221 #[must_use]
max(self, rhs: Self) -> Self222 pub fn max(self, rhs: Self) -> Self {
223 Self(f32x4_pmax(self.0, rhs.0))
224 }
225
226 /// Component-wise clamping of values, similar to [`f32::clamp`].
227 ///
228 /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
229 ///
230 /// # Panics
231 ///
232 /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
233 #[inline]
234 #[must_use]
clamp(self, min: Self, max: Self) -> Self235 pub fn clamp(self, min: Self, max: Self) -> Self {
236 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
237 self.max(min).min(max)
238 }
239
240 /// Returns the horizontal minimum of `self`.
241 ///
242 /// In other words this computes `min(x, y, ..)`.
243 #[inline]
244 #[must_use]
min_element(self) -> f32245 pub fn min_element(self) -> f32 {
246 let v = self.0;
247 let v = f32x4_pmin(v, i32x4_shuffle::<2, 3, 0, 0>(v, v));
248 let v = f32x4_pmin(v, i32x4_shuffle::<1, 0, 0, 0>(v, v));
249 f32x4_extract_lane::<0>(v)
250 }
251
252 /// Returns the horizontal maximum of `self`.
253 ///
254 /// In other words this computes `max(x, y, ..)`.
255 #[inline]
256 #[must_use]
max_element(self) -> f32257 pub fn max_element(self) -> f32 {
258 let v = self.0;
259 let v = f32x4_pmax(v, i32x4_shuffle::<2, 3, 0, 0>(v, v));
260 let v = f32x4_pmax(v, i32x4_shuffle::<1, 0, 0, 0>(v, v));
261 f32x4_extract_lane::<0>(v)
262 }
263
264 /// Returns the sum of all elements of `self`.
265 ///
266 /// In other words, this computes `self.x + self.y + ..`.
267 #[inline]
268 #[must_use]
element_sum(self) -> f32269 pub fn element_sum(self) -> f32 {
270 let v = self.0;
271 let v = f32x4_add(v, i32x4_shuffle::<1, 0, 3, 0>(v, v));
272 let v = f32x4_add(v, i32x4_shuffle::<2, 0, 0, 0>(v, v));
273 f32x4_extract_lane::<0>(v)
274 }
275
276 /// Returns the product of all elements of `self`.
277 ///
278 /// In other words, this computes `self.x * self.y * ..`.
279 #[inline]
280 #[must_use]
element_product(self) -> f32281 pub fn element_product(self) -> f32 {
282 let v = self.0;
283 let v = f32x4_mul(v, i32x4_shuffle::<1, 0, 3, 0>(v, v));
284 let v = f32x4_mul(v, i32x4_shuffle::<2, 0, 0, 0>(v, v));
285 f32x4_extract_lane::<0>(v)
286 }
287
288 /// Returns a vector mask containing the result of a `==` comparison for each element of
289 /// `self` and `rhs`.
290 ///
291 /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
292 /// elements.
293 #[inline]
294 #[must_use]
cmpeq(self, rhs: Self) -> BVec4A295 pub fn cmpeq(self, rhs: Self) -> BVec4A {
296 BVec4A(f32x4_eq(self.0, rhs.0))
297 }
298
299 /// Returns a vector mask containing the result of a `!=` comparison for each element of
300 /// `self` and `rhs`.
301 ///
302 /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
303 /// elements.
304 #[inline]
305 #[must_use]
cmpne(self, rhs: Self) -> BVec4A306 pub fn cmpne(self, rhs: Self) -> BVec4A {
307 BVec4A(f32x4_ne(self.0, rhs.0))
308 }
309
310 /// Returns a vector mask containing the result of a `>=` comparison for each element of
311 /// `self` and `rhs`.
312 ///
313 /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
314 /// elements.
315 #[inline]
316 #[must_use]
cmpge(self, rhs: Self) -> BVec4A317 pub fn cmpge(self, rhs: Self) -> BVec4A {
318 BVec4A(f32x4_ge(self.0, rhs.0))
319 }
320
321 /// Returns a vector mask containing the result of a `>` comparison for each element of
322 /// `self` and `rhs`.
323 ///
324 /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
325 /// elements.
326 #[inline]
327 #[must_use]
cmpgt(self, rhs: Self) -> BVec4A328 pub fn cmpgt(self, rhs: Self) -> BVec4A {
329 BVec4A(f32x4_gt(self.0, rhs.0))
330 }
331
332 /// Returns a vector mask containing the result of a `<=` comparison for each element of
333 /// `self` and `rhs`.
334 ///
335 /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
336 /// elements.
337 #[inline]
338 #[must_use]
cmple(self, rhs: Self) -> BVec4A339 pub fn cmple(self, rhs: Self) -> BVec4A {
340 BVec4A(f32x4_le(self.0, rhs.0))
341 }
342
343 /// Returns a vector mask containing the result of a `<` comparison for each element of
344 /// `self` and `rhs`.
345 ///
346 /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
347 /// elements.
348 #[inline]
349 #[must_use]
cmplt(self, rhs: Self) -> BVec4A350 pub fn cmplt(self, rhs: Self) -> BVec4A {
351 BVec4A(f32x4_lt(self.0, rhs.0))
352 }
353
354 /// Returns a vector containing the absolute value of each element of `self`.
355 #[inline]
356 #[must_use]
abs(self) -> Self357 pub fn abs(self) -> Self {
358 Self(f32x4_abs(self.0))
359 }
360
361 /// Returns a vector with elements representing the sign of `self`.
362 ///
363 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
364 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
365 /// - `NAN` if the number is `NAN`
366 #[inline]
367 #[must_use]
signum(self) -> Self368 pub fn signum(self) -> Self {
369 let result = Self(v128_or(v128_and(self.0, Self::NEG_ONE.0), Self::ONE.0));
370 let mask = self.is_nan_mask();
371 Self::select(mask, self, result)
372 }
373
374 /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
375 #[inline]
376 #[must_use]
copysign(self, rhs: Self) -> Self377 pub fn copysign(self, rhs: Self) -> Self {
378 let mask = Self::splat(-0.0);
379 Self(v128_or(
380 v128_and(rhs.0, mask.0),
381 v128_andnot(self.0, mask.0),
382 ))
383 }
384
385 /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
386 ///
387 /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes
388 /// into the first lowest bit, element `y` into the second, etc.
389 #[inline]
390 #[must_use]
is_negative_bitmask(self) -> u32391 pub fn is_negative_bitmask(self) -> u32 {
392 u32x4_bitmask(self.0) as u32
393 }
394
395 /// Returns `true` if, and only if, all elements are finite. If any element is either
396 /// `NaN`, positive or negative infinity, this will return `false`.
397 #[inline]
398 #[must_use]
is_finite(self) -> bool399 pub fn is_finite(self) -> bool {
400 self.is_finite_mask().all()
401 }
402
403 /// Performs `is_finite` on each element of self, returning a vector mask of the results.
404 ///
405 /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
is_finite_mask(self) -> BVec4A406 pub fn is_finite_mask(self) -> BVec4A {
407 BVec4A(f32x4_lt(f32x4_abs(self.0), Self::INFINITY.0))
408 }
409
410 /// Returns `true` if any elements are `NaN`.
411 #[inline]
412 #[must_use]
is_nan(self) -> bool413 pub fn is_nan(self) -> bool {
414 self.is_nan_mask().any()
415 }
416
417 /// Performs `is_nan` on each element of self, returning a vector mask of the results.
418 ///
419 /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
420 #[inline]
421 #[must_use]
is_nan_mask(self) -> BVec4A422 pub fn is_nan_mask(self) -> BVec4A {
423 BVec4A(f32x4_ne(self.0, self.0))
424 }
425
426 /// Computes the length of `self`.
427 #[doc(alias = "magnitude")]
428 #[inline]
429 #[must_use]
length(self) -> f32430 pub fn length(self) -> f32 {
431 let dot = dot4_in_x(self.0, self.0);
432 f32x4_extract_lane::<0>(f32x4_sqrt(dot))
433 }
434
435 /// Computes the squared length of `self`.
436 ///
437 /// This is faster than `length()` as it avoids a square root operation.
438 #[doc(alias = "magnitude2")]
439 #[inline]
440 #[must_use]
length_squared(self) -> f32441 pub fn length_squared(self) -> f32 {
442 self.dot(self)
443 }
444
445 /// Computes `1.0 / length()`.
446 ///
447 /// For valid results, `self` must _not_ be of length zero.
448 #[inline]
449 #[must_use]
length_recip(self) -> f32450 pub fn length_recip(self) -> f32 {
451 let dot = dot4_in_x(self.0, self.0);
452 f32x4_extract_lane::<0>(f32x4_div(Self::ONE.0, f32x4_sqrt(dot)))
453 }
454
455 /// Computes the Euclidean distance between two points in space.
456 #[inline]
457 #[must_use]
distance(self, rhs: Self) -> f32458 pub fn distance(self, rhs: Self) -> f32 {
459 (self - rhs).length()
460 }
461
462 /// Compute the squared euclidean distance between two points in space.
463 #[inline]
464 #[must_use]
distance_squared(self, rhs: Self) -> f32465 pub fn distance_squared(self, rhs: Self) -> f32 {
466 (self - rhs).length_squared()
467 }
468
469 /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
470 #[inline]
471 #[must_use]
div_euclid(self, rhs: Self) -> Self472 pub fn div_euclid(self, rhs: Self) -> Self {
473 Self::new(
474 math::div_euclid(self.x, rhs.x),
475 math::div_euclid(self.y, rhs.y),
476 math::div_euclid(self.z, rhs.z),
477 math::div_euclid(self.w, rhs.w),
478 )
479 }
480
481 /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
482 ///
483 /// [Euclidean division]: f32::rem_euclid
484 #[inline]
485 #[must_use]
rem_euclid(self, rhs: Self) -> Self486 pub fn rem_euclid(self, rhs: Self) -> Self {
487 Self::new(
488 math::rem_euclid(self.x, rhs.x),
489 math::rem_euclid(self.y, rhs.y),
490 math::rem_euclid(self.z, rhs.z),
491 math::rem_euclid(self.w, rhs.w),
492 )
493 }
494
495 /// Returns `self` normalized to length 1.0.
496 ///
497 /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
498 ///
499 /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
500 ///
501 /// Panics
502 ///
503 /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
504 #[inline]
505 #[must_use]
normalize(self) -> Self506 pub fn normalize(self) -> Self {
507 let length = f32x4_sqrt(dot4_into_v128(self.0, self.0));
508 #[allow(clippy::let_and_return)]
509 let normalized = Self(f32x4_div(self.0, length));
510 glam_assert!(normalized.is_finite());
511 normalized
512 }
513
514 /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
515 ///
516 /// In particular, if the input is zero (or very close to zero), or non-finite,
517 /// the result of this operation will be `None`.
518 ///
519 /// See also [`Self::normalize_or_zero()`].
520 #[inline]
521 #[must_use]
try_normalize(self) -> Option<Self>522 pub fn try_normalize(self) -> Option<Self> {
523 let rcp = self.length_recip();
524 if rcp.is_finite() && rcp > 0.0 {
525 Some(self * rcp)
526 } else {
527 None
528 }
529 }
530
531 /// Returns `self` normalized to length 1.0 if possible, else returns a
532 /// fallback value.
533 ///
534 /// In particular, if the input is zero (or very close to zero), or non-finite,
535 /// the result of this operation will be the fallback value.
536 ///
537 /// See also [`Self::try_normalize()`].
538 #[inline]
539 #[must_use]
normalize_or(self, fallback: Self) -> Self540 pub fn normalize_or(self, fallback: Self) -> Self {
541 let rcp = self.length_recip();
542 if rcp.is_finite() && rcp > 0.0 {
543 self * rcp
544 } else {
545 fallback
546 }
547 }
548
549 /// Returns `self` normalized to length 1.0 if possible, else returns zero.
550 ///
551 /// In particular, if the input is zero (or very close to zero), or non-finite,
552 /// the result of this operation will be zero.
553 ///
554 /// See also [`Self::try_normalize()`].
555 #[inline]
556 #[must_use]
normalize_or_zero(self) -> Self557 pub fn normalize_or_zero(self) -> Self {
558 self.normalize_or(Self::ZERO)
559 }
560
561 /// Returns whether `self` is length `1.0` or not.
562 ///
563 /// Uses a precision threshold of approximately `1e-4`.
564 #[inline]
565 #[must_use]
is_normalized(self) -> bool566 pub fn is_normalized(self) -> bool {
567 math::abs(self.length_squared() - 1.0) <= 2e-4
568 }
569
570 /// Returns the vector projection of `self` onto `rhs`.
571 ///
572 /// `rhs` must be of non-zero length.
573 ///
574 /// # Panics
575 ///
576 /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
577 #[inline]
578 #[must_use]
project_onto(self, rhs: Self) -> Self579 pub fn project_onto(self, rhs: Self) -> Self {
580 let other_len_sq_rcp = rhs.dot(rhs).recip();
581 glam_assert!(other_len_sq_rcp.is_finite());
582 rhs * self.dot(rhs) * other_len_sq_rcp
583 }
584
585 /// Returns the vector rejection of `self` from `rhs`.
586 ///
587 /// The vector rejection is the vector perpendicular to the projection of `self` onto
588 /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
589 ///
590 /// `rhs` must be of non-zero length.
591 ///
592 /// # Panics
593 ///
594 /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
595 #[doc(alias("plane"))]
596 #[inline]
597 #[must_use]
reject_from(self, rhs: Self) -> Self598 pub fn reject_from(self, rhs: Self) -> Self {
599 self - self.project_onto(rhs)
600 }
601
602 /// Returns the vector projection of `self` onto `rhs`.
603 ///
604 /// `rhs` must be normalized.
605 ///
606 /// # Panics
607 ///
608 /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
609 #[inline]
610 #[must_use]
project_onto_normalized(self, rhs: Self) -> Self611 pub fn project_onto_normalized(self, rhs: Self) -> Self {
612 glam_assert!(rhs.is_normalized());
613 rhs * self.dot(rhs)
614 }
615
616 /// Returns the vector rejection of `self` from `rhs`.
617 ///
618 /// The vector rejection is the vector perpendicular to the projection of `self` onto
619 /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
620 ///
621 /// `rhs` must be normalized.
622 ///
623 /// # Panics
624 ///
625 /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
626 #[doc(alias("plane"))]
627 #[inline]
628 #[must_use]
reject_from_normalized(self, rhs: Self) -> Self629 pub fn reject_from_normalized(self, rhs: Self) -> Self {
630 self - self.project_onto_normalized(rhs)
631 }
632
633 /// Returns a vector containing the nearest integer to a number for each element of `self`.
634 /// Round half-way cases away from 0.0.
635 #[inline]
636 #[must_use]
round(self) -> Self637 pub fn round(self) -> Self {
638 Self(f32x4_nearest(self.0))
639 }
640
641 /// Returns a vector containing the largest integer less than or equal to a number for each
642 /// element of `self`.
643 #[inline]
644 #[must_use]
floor(self) -> Self645 pub fn floor(self) -> Self {
646 Self(f32x4_floor(self.0))
647 }
648
649 /// Returns a vector containing the smallest integer greater than or equal to a number for
650 /// each element of `self`.
651 #[inline]
652 #[must_use]
ceil(self) -> Self653 pub fn ceil(self) -> Self {
654 Self(f32x4_ceil(self.0))
655 }
656
657 /// Returns a vector containing the integer part each element of `self`. This means numbers are
658 /// always truncated towards zero.
659 #[inline]
660 #[must_use]
trunc(self) -> Self661 pub fn trunc(self) -> Self {
662 Self(f32x4_trunc(self.0))
663 }
664
665 /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
666 ///
667 /// Note that this differs from the GLSL implementation of `fract` which returns
668 /// `self - self.floor()`.
669 ///
670 /// Note that this is fast but not precise for large numbers.
671 #[inline]
672 #[must_use]
fract(self) -> Self673 pub fn fract(self) -> Self {
674 self - self.trunc()
675 }
676
677 /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
678 ///
679 /// Note that this differs from the Rust implementation of `fract` which returns
680 /// `self - self.trunc()`.
681 ///
682 /// Note that this is fast but not precise for large numbers.
683 #[inline]
684 #[must_use]
fract_gl(self) -> Self685 pub fn fract_gl(self) -> Self {
686 self - self.floor()
687 }
688
689 /// Returns a vector containing `e^self` (the exponential function) for each element of
690 /// `self`.
691 #[inline]
692 #[must_use]
exp(self) -> Self693 pub fn exp(self) -> Self {
694 Self::new(
695 math::exp(self.x),
696 math::exp(self.y),
697 math::exp(self.z),
698 math::exp(self.w),
699 )
700 }
701
702 /// Returns a vector containing each element of `self` raised to the power of `n`.
703 #[inline]
704 #[must_use]
powf(self, n: f32) -> Self705 pub fn powf(self, n: f32) -> Self {
706 Self::new(
707 math::powf(self.x, n),
708 math::powf(self.y, n),
709 math::powf(self.z, n),
710 math::powf(self.w, n),
711 )
712 }
713
714 /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
715 #[inline]
716 #[must_use]
recip(self) -> Self717 pub fn recip(self) -> Self {
718 Self(f32x4_div(Self::ONE.0, self.0))
719 }
720
721 /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
722 ///
723 /// When `s` is `0.0`, the result will be equal to `self`. When `s` is `1.0`, the result
724 /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
725 /// extrapolated.
726 #[doc(alias = "mix")]
727 #[inline]
728 #[must_use]
lerp(self, rhs: Self, s: f32) -> Self729 pub fn lerp(self, rhs: Self, s: f32) -> Self {
730 self * (1.0 - s) + rhs * s
731 }
732
733 /// Moves towards `rhs` based on the value `d`.
734 ///
735 /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
736 /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
737 #[inline]
738 #[must_use]
move_towards(&self, rhs: Self, d: f32) -> Self739 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
740 let a = rhs - *self;
741 let len = a.length();
742 if len <= d || len <= 1e-4 {
743 return rhs;
744 }
745 *self + a / len * d
746 }
747
748 /// Calculates the midpoint between `self` and `rhs`.
749 ///
750 /// The midpoint is the average of, or halfway point between, two vectors.
751 /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
752 /// while being slightly cheaper to compute.
753 #[inline]
midpoint(self, rhs: Self) -> Self754 pub fn midpoint(self, rhs: Self) -> Self {
755 (self + rhs) * 0.5
756 }
757
758 /// Returns true if the absolute difference of all elements between `self` and `rhs` is
759 /// less than or equal to `max_abs_diff`.
760 ///
761 /// This can be used to compare if two vectors contain similar elements. It works best when
762 /// comparing with a known value. The `max_abs_diff` that should be used used depends on
763 /// the values being compared against.
764 ///
765 /// For more see
766 /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
767 #[inline]
768 #[must_use]
abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool769 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
770 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
771 }
772
773 /// Returns a vector with a length no less than `min` and no more than `max`.
774 ///
775 /// # Panics
776 ///
777 /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
778 #[inline]
779 #[must_use]
clamp_length(self, min: f32, max: f32) -> Self780 pub fn clamp_length(self, min: f32, max: f32) -> Self {
781 glam_assert!(0.0 <= min);
782 glam_assert!(min <= max);
783 let length_sq = self.length_squared();
784 if length_sq < min * min {
785 min * (self / math::sqrt(length_sq))
786 } else if length_sq > max * max {
787 max * (self / math::sqrt(length_sq))
788 } else {
789 self
790 }
791 }
792
793 /// Returns a vector with a length no more than `max`.
794 ///
795 /// # Panics
796 ///
797 /// Will panic if `max` is negative when `glam_assert` is enabled.
798 #[inline]
799 #[must_use]
clamp_length_max(self, max: f32) -> Self800 pub fn clamp_length_max(self, max: f32) -> Self {
801 glam_assert!(0.0 <= max);
802 let length_sq = self.length_squared();
803 if length_sq > max * max {
804 max * (self / math::sqrt(length_sq))
805 } else {
806 self
807 }
808 }
809
810 /// Returns a vector with a length no less than `min`.
811 ///
812 /// # Panics
813 ///
814 /// Will panic if `min` is negative when `glam_assert` is enabled.
815 #[inline]
816 #[must_use]
clamp_length_min(self, min: f32) -> Self817 pub fn clamp_length_min(self, min: f32) -> Self {
818 glam_assert!(0.0 <= min);
819 let length_sq = self.length_squared();
820 if length_sq < min * min {
821 min * (self / math::sqrt(length_sq))
822 } else {
823 self
824 }
825 }
826
827 /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
828 /// error, yielding a more accurate result than an unfused multiply-add.
829 ///
830 /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
831 /// architecture has a dedicated fma CPU instruction. However, this is not always true,
832 /// and will be heavily dependant on designing algorithms with specific target hardware in
833 /// mind.
834 #[inline]
835 #[must_use]
mul_add(self, a: Self, b: Self) -> Self836 pub fn mul_add(self, a: Self, b: Self) -> Self {
837 Self::new(
838 math::mul_add(self.x, a.x, b.x),
839 math::mul_add(self.y, a.y, b.y),
840 math::mul_add(self.z, a.z, b.z),
841 math::mul_add(self.w, a.w, b.w),
842 )
843 }
844
845 /// Returns the reflection vector for a given incident vector `self` and surface normal
846 /// `normal`.
847 ///
848 /// `normal` must be normalized.
849 ///
850 /// # Panics
851 ///
852 /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
853 #[inline]
854 #[must_use]
reflect(self, normal: Self) -> Self855 pub fn reflect(self, normal: Self) -> Self {
856 glam_assert!(normal.is_normalized());
857 self - 2.0 * self.dot(normal) * normal
858 }
859
860 /// Returns the refraction direction for a given incident vector `self`, surface normal
861 /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
862 /// a zero vector will be returned.
863 ///
864 /// `self` and `normal` must be normalized.
865 ///
866 /// # Panics
867 ///
868 /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
869 #[inline]
870 #[must_use]
refract(self, normal: Self, eta: f32) -> Self871 pub fn refract(self, normal: Self, eta: f32) -> Self {
872 glam_assert!(self.is_normalized());
873 glam_assert!(normal.is_normalized());
874 let n_dot_i = normal.dot(self);
875 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
876 if k >= 0.0 {
877 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
878 } else {
879 Self::ZERO
880 }
881 }
882
883 /// Casts all elements of `self` to `f64`.
884 #[inline]
885 #[must_use]
as_dvec4(&self) -> crate::DVec4886 pub fn as_dvec4(&self) -> crate::DVec4 {
887 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
888 }
889
890 /// Casts all elements of `self` to `i8`.
891 #[inline]
892 #[must_use]
as_i8vec4(&self) -> crate::I8Vec4893 pub fn as_i8vec4(&self) -> crate::I8Vec4 {
894 crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
895 }
896
897 /// Casts all elements of `self` to `u8`.
898 #[inline]
899 #[must_use]
as_u8vec4(&self) -> crate::U8Vec4900 pub fn as_u8vec4(&self) -> crate::U8Vec4 {
901 crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
902 }
903
904 /// Casts all elements of `self` to `i16`.
905 #[inline]
906 #[must_use]
as_i16vec4(&self) -> crate::I16Vec4907 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
908 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
909 }
910
911 /// Casts all elements of `self` to `u16`.
912 #[inline]
913 #[must_use]
as_u16vec4(&self) -> crate::U16Vec4914 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
915 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
916 }
917
918 /// Casts all elements of `self` to `i32`.
919 #[inline]
920 #[must_use]
as_ivec4(&self) -> crate::IVec4921 pub fn as_ivec4(&self) -> crate::IVec4 {
922 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
923 }
924
925 /// Casts all elements of `self` to `u32`.
926 #[inline]
927 #[must_use]
as_uvec4(&self) -> crate::UVec4928 pub fn as_uvec4(&self) -> crate::UVec4 {
929 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
930 }
931
932 /// Casts all elements of `self` to `i64`.
933 #[inline]
934 #[must_use]
as_i64vec4(&self) -> crate::I64Vec4935 pub fn as_i64vec4(&self) -> crate::I64Vec4 {
936 crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
937 }
938
939 /// Casts all elements of `self` to `u64`.
940 #[inline]
941 #[must_use]
as_u64vec4(&self) -> crate::U64Vec4942 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
943 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
944 }
945 }
946
947 impl Default for Vec4 {
948 #[inline(always)]
default() -> Self949 fn default() -> Self {
950 Self::ZERO
951 }
952 }
953
954 impl PartialEq for Vec4 {
955 #[inline]
eq(&self, rhs: &Self) -> bool956 fn eq(&self, rhs: &Self) -> bool {
957 self.cmpeq(*rhs).all()
958 }
959 }
960
961 impl Div<Vec4> for Vec4 {
962 type Output = Self;
963 #[inline]
div(self, rhs: Self) -> Self964 fn div(self, rhs: Self) -> Self {
965 Self(f32x4_div(self.0, rhs.0))
966 }
967 }
968
969 impl Div<&Vec4> for Vec4 {
970 type Output = Vec4;
971 #[inline]
div(self, rhs: &Vec4) -> Vec4972 fn div(self, rhs: &Vec4) -> Vec4 {
973 self.div(*rhs)
974 }
975 }
976
977 impl Div<&Vec4> for &Vec4 {
978 type Output = Vec4;
979 #[inline]
div(self, rhs: &Vec4) -> Vec4980 fn div(self, rhs: &Vec4) -> Vec4 {
981 (*self).div(*rhs)
982 }
983 }
984
985 impl Div<Vec4> for &Vec4 {
986 type Output = Vec4;
987 #[inline]
div(self, rhs: Vec4) -> Vec4988 fn div(self, rhs: Vec4) -> Vec4 {
989 (*self).div(rhs)
990 }
991 }
992
993 impl DivAssign<Vec4> for Vec4 {
994 #[inline]
div_assign(&mut self, rhs: Self)995 fn div_assign(&mut self, rhs: Self) {
996 self.0 = f32x4_div(self.0, rhs.0);
997 }
998 }
999
1000 impl DivAssign<&Vec4> for Vec4 {
1001 #[inline]
div_assign(&mut self, rhs: &Vec4)1002 fn div_assign(&mut self, rhs: &Vec4) {
1003 self.div_assign(*rhs)
1004 }
1005 }
1006
1007 impl Div<f32> for Vec4 {
1008 type Output = Self;
1009 #[inline]
div(self, rhs: f32) -> Self1010 fn div(self, rhs: f32) -> Self {
1011 Self(f32x4_div(self.0, f32x4_splat(rhs)))
1012 }
1013 }
1014
1015 impl Div<&f32> for Vec4 {
1016 type Output = Vec4;
1017 #[inline]
div(self, rhs: &f32) -> Vec41018 fn div(self, rhs: &f32) -> Vec4 {
1019 self.div(*rhs)
1020 }
1021 }
1022
1023 impl Div<&f32> for &Vec4 {
1024 type Output = Vec4;
1025 #[inline]
div(self, rhs: &f32) -> Vec41026 fn div(self, rhs: &f32) -> Vec4 {
1027 (*self).div(*rhs)
1028 }
1029 }
1030
1031 impl Div<f32> for &Vec4 {
1032 type Output = Vec4;
1033 #[inline]
div(self, rhs: f32) -> Vec41034 fn div(self, rhs: f32) -> Vec4 {
1035 (*self).div(rhs)
1036 }
1037 }
1038
1039 impl DivAssign<f32> for Vec4 {
1040 #[inline]
div_assign(&mut self, rhs: f32)1041 fn div_assign(&mut self, rhs: f32) {
1042 self.0 = f32x4_div(self.0, f32x4_splat(rhs));
1043 }
1044 }
1045
1046 impl DivAssign<&f32> for Vec4 {
1047 #[inline]
div_assign(&mut self, rhs: &f32)1048 fn div_assign(&mut self, rhs: &f32) {
1049 self.div_assign(*rhs)
1050 }
1051 }
1052
1053 impl Div<Vec4> for f32 {
1054 type Output = Vec4;
1055 #[inline]
div(self, rhs: Vec4) -> Vec41056 fn div(self, rhs: Vec4) -> Vec4 {
1057 Vec4(f32x4_div(f32x4_splat(self), rhs.0))
1058 }
1059 }
1060
1061 impl Div<&Vec4> for f32 {
1062 type Output = Vec4;
1063 #[inline]
div(self, rhs: &Vec4) -> Vec41064 fn div(self, rhs: &Vec4) -> Vec4 {
1065 self.div(*rhs)
1066 }
1067 }
1068
1069 impl Div<&Vec4> for &f32 {
1070 type Output = Vec4;
1071 #[inline]
div(self, rhs: &Vec4) -> Vec41072 fn div(self, rhs: &Vec4) -> Vec4 {
1073 (*self).div(*rhs)
1074 }
1075 }
1076
1077 impl Div<Vec4> for &f32 {
1078 type Output = Vec4;
1079 #[inline]
div(self, rhs: Vec4) -> Vec41080 fn div(self, rhs: Vec4) -> Vec4 {
1081 (*self).div(rhs)
1082 }
1083 }
1084
1085 impl Mul<Vec4> for Vec4 {
1086 type Output = Self;
1087 #[inline]
mul(self, rhs: Self) -> Self1088 fn mul(self, rhs: Self) -> Self {
1089 Self(f32x4_mul(self.0, rhs.0))
1090 }
1091 }
1092
1093 impl Mul<&Vec4> for Vec4 {
1094 type Output = Vec4;
1095 #[inline]
mul(self, rhs: &Vec4) -> Vec41096 fn mul(self, rhs: &Vec4) -> Vec4 {
1097 self.mul(*rhs)
1098 }
1099 }
1100
1101 impl Mul<&Vec4> for &Vec4 {
1102 type Output = Vec4;
1103 #[inline]
mul(self, rhs: &Vec4) -> Vec41104 fn mul(self, rhs: &Vec4) -> Vec4 {
1105 (*self).mul(*rhs)
1106 }
1107 }
1108
1109 impl Mul<Vec4> for &Vec4 {
1110 type Output = Vec4;
1111 #[inline]
mul(self, rhs: Vec4) -> Vec41112 fn mul(self, rhs: Vec4) -> Vec4 {
1113 (*self).mul(rhs)
1114 }
1115 }
1116
1117 impl MulAssign<Vec4> for Vec4 {
1118 #[inline]
mul_assign(&mut self, rhs: Self)1119 fn mul_assign(&mut self, rhs: Self) {
1120 self.0 = f32x4_mul(self.0, rhs.0);
1121 }
1122 }
1123
1124 impl MulAssign<&Vec4> for Vec4 {
1125 #[inline]
mul_assign(&mut self, rhs: &Vec4)1126 fn mul_assign(&mut self, rhs: &Vec4) {
1127 self.mul_assign(*rhs)
1128 }
1129 }
1130
1131 impl Mul<f32> for Vec4 {
1132 type Output = Self;
1133 #[inline]
mul(self, rhs: f32) -> Self1134 fn mul(self, rhs: f32) -> Self {
1135 Self(f32x4_mul(self.0, f32x4_splat(rhs)))
1136 }
1137 }
1138
1139 impl Mul<&f32> for Vec4 {
1140 type Output = Vec4;
1141 #[inline]
mul(self, rhs: &f32) -> Vec41142 fn mul(self, rhs: &f32) -> Vec4 {
1143 self.mul(*rhs)
1144 }
1145 }
1146
1147 impl Mul<&f32> for &Vec4 {
1148 type Output = Vec4;
1149 #[inline]
mul(self, rhs: &f32) -> Vec41150 fn mul(self, rhs: &f32) -> Vec4 {
1151 (*self).mul(*rhs)
1152 }
1153 }
1154
1155 impl Mul<f32> for &Vec4 {
1156 type Output = Vec4;
1157 #[inline]
mul(self, rhs: f32) -> Vec41158 fn mul(self, rhs: f32) -> Vec4 {
1159 (*self).mul(rhs)
1160 }
1161 }
1162
1163 impl MulAssign<f32> for Vec4 {
1164 #[inline]
mul_assign(&mut self, rhs: f32)1165 fn mul_assign(&mut self, rhs: f32) {
1166 self.0 = f32x4_mul(self.0, f32x4_splat(rhs))
1167 }
1168 }
1169
1170 impl MulAssign<&f32> for Vec4 {
1171 #[inline]
mul_assign(&mut self, rhs: &f32)1172 fn mul_assign(&mut self, rhs: &f32) {
1173 self.mul_assign(*rhs)
1174 }
1175 }
1176
1177 impl Mul<Vec4> for f32 {
1178 type Output = Vec4;
1179 #[inline]
mul(self, rhs: Vec4) -> Vec41180 fn mul(self, rhs: Vec4) -> Vec4 {
1181 Vec4(f32x4_mul(f32x4_splat(self), rhs.0))
1182 }
1183 }
1184
1185 impl Mul<&Vec4> for f32 {
1186 type Output = Vec4;
1187 #[inline]
mul(self, rhs: &Vec4) -> Vec41188 fn mul(self, rhs: &Vec4) -> Vec4 {
1189 self.mul(*rhs)
1190 }
1191 }
1192
1193 impl Mul<&Vec4> for &f32 {
1194 type Output = Vec4;
1195 #[inline]
mul(self, rhs: &Vec4) -> Vec41196 fn mul(self, rhs: &Vec4) -> Vec4 {
1197 (*self).mul(*rhs)
1198 }
1199 }
1200
1201 impl Mul<Vec4> for &f32 {
1202 type Output = Vec4;
1203 #[inline]
mul(self, rhs: Vec4) -> Vec41204 fn mul(self, rhs: Vec4) -> Vec4 {
1205 (*self).mul(rhs)
1206 }
1207 }
1208
1209 impl Add<Vec4> for Vec4 {
1210 type Output = Self;
1211 #[inline]
add(self, rhs: Self) -> Self1212 fn add(self, rhs: Self) -> Self {
1213 Self(f32x4_add(self.0, rhs.0))
1214 }
1215 }
1216
1217 impl Add<&Vec4> for Vec4 {
1218 type Output = Vec4;
1219 #[inline]
add(self, rhs: &Vec4) -> Vec41220 fn add(self, rhs: &Vec4) -> Vec4 {
1221 self.add(*rhs)
1222 }
1223 }
1224
1225 impl Add<&Vec4> for &Vec4 {
1226 type Output = Vec4;
1227 #[inline]
add(self, rhs: &Vec4) -> Vec41228 fn add(self, rhs: &Vec4) -> Vec4 {
1229 (*self).add(*rhs)
1230 }
1231 }
1232
1233 impl Add<Vec4> for &Vec4 {
1234 type Output = Vec4;
1235 #[inline]
add(self, rhs: Vec4) -> Vec41236 fn add(self, rhs: Vec4) -> Vec4 {
1237 (*self).add(rhs)
1238 }
1239 }
1240
1241 impl AddAssign<Vec4> for Vec4 {
1242 #[inline]
add_assign(&mut self, rhs: Self)1243 fn add_assign(&mut self, rhs: Self) {
1244 self.0 = f32x4_add(self.0, rhs.0);
1245 }
1246 }
1247
1248 impl AddAssign<&Vec4> for Vec4 {
1249 #[inline]
add_assign(&mut self, rhs: &Vec4)1250 fn add_assign(&mut self, rhs: &Vec4) {
1251 self.add_assign(*rhs)
1252 }
1253 }
1254
1255 impl Add<f32> for Vec4 {
1256 type Output = Self;
1257 #[inline]
add(self, rhs: f32) -> Self1258 fn add(self, rhs: f32) -> Self {
1259 Self(f32x4_add(self.0, f32x4_splat(rhs)))
1260 }
1261 }
1262
1263 impl Add<&f32> for Vec4 {
1264 type Output = Vec4;
1265 #[inline]
add(self, rhs: &f32) -> Vec41266 fn add(self, rhs: &f32) -> Vec4 {
1267 self.add(*rhs)
1268 }
1269 }
1270
1271 impl Add<&f32> for &Vec4 {
1272 type Output = Vec4;
1273 #[inline]
add(self, rhs: &f32) -> Vec41274 fn add(self, rhs: &f32) -> Vec4 {
1275 (*self).add(*rhs)
1276 }
1277 }
1278
1279 impl Add<f32> for &Vec4 {
1280 type Output = Vec4;
1281 #[inline]
add(self, rhs: f32) -> Vec41282 fn add(self, rhs: f32) -> Vec4 {
1283 (*self).add(rhs)
1284 }
1285 }
1286
1287 impl AddAssign<f32> for Vec4 {
1288 #[inline]
add_assign(&mut self, rhs: f32)1289 fn add_assign(&mut self, rhs: f32) {
1290 self.0 = f32x4_add(self.0, f32x4_splat(rhs));
1291 }
1292 }
1293
1294 impl AddAssign<&f32> for Vec4 {
1295 #[inline]
add_assign(&mut self, rhs: &f32)1296 fn add_assign(&mut self, rhs: &f32) {
1297 self.add_assign(*rhs)
1298 }
1299 }
1300
1301 impl Add<Vec4> for f32 {
1302 type Output = Vec4;
1303 #[inline]
add(self, rhs: Vec4) -> Vec41304 fn add(self, rhs: Vec4) -> Vec4 {
1305 Vec4(f32x4_add(f32x4_splat(self), rhs.0))
1306 }
1307 }
1308
1309 impl Add<&Vec4> for f32 {
1310 type Output = Vec4;
1311 #[inline]
add(self, rhs: &Vec4) -> Vec41312 fn add(self, rhs: &Vec4) -> Vec4 {
1313 self.add(*rhs)
1314 }
1315 }
1316
1317 impl Add<&Vec4> for &f32 {
1318 type Output = Vec4;
1319 #[inline]
add(self, rhs: &Vec4) -> Vec41320 fn add(self, rhs: &Vec4) -> Vec4 {
1321 (*self).add(*rhs)
1322 }
1323 }
1324
1325 impl Add<Vec4> for &f32 {
1326 type Output = Vec4;
1327 #[inline]
add(self, rhs: Vec4) -> Vec41328 fn add(self, rhs: Vec4) -> Vec4 {
1329 (*self).add(rhs)
1330 }
1331 }
1332
1333 impl Sub<Vec4> for Vec4 {
1334 type Output = Self;
1335 #[inline]
sub(self, rhs: Self) -> Self1336 fn sub(self, rhs: Self) -> Self {
1337 Self(f32x4_sub(self.0, rhs.0))
1338 }
1339 }
1340
1341 impl Sub<&Vec4> for Vec4 {
1342 type Output = Vec4;
1343 #[inline]
sub(self, rhs: &Vec4) -> Vec41344 fn sub(self, rhs: &Vec4) -> Vec4 {
1345 self.sub(*rhs)
1346 }
1347 }
1348
1349 impl Sub<&Vec4> for &Vec4 {
1350 type Output = Vec4;
1351 #[inline]
sub(self, rhs: &Vec4) -> Vec41352 fn sub(self, rhs: &Vec4) -> Vec4 {
1353 (*self).sub(*rhs)
1354 }
1355 }
1356
1357 impl Sub<Vec4> for &Vec4 {
1358 type Output = Vec4;
1359 #[inline]
sub(self, rhs: Vec4) -> Vec41360 fn sub(self, rhs: Vec4) -> Vec4 {
1361 (*self).sub(rhs)
1362 }
1363 }
1364
1365 impl SubAssign<Vec4> for Vec4 {
1366 #[inline]
sub_assign(&mut self, rhs: Vec4)1367 fn sub_assign(&mut self, rhs: Vec4) {
1368 self.0 = f32x4_sub(self.0, rhs.0);
1369 }
1370 }
1371
1372 impl SubAssign<&Vec4> for Vec4 {
1373 #[inline]
sub_assign(&mut self, rhs: &Vec4)1374 fn sub_assign(&mut self, rhs: &Vec4) {
1375 self.sub_assign(*rhs)
1376 }
1377 }
1378
1379 impl Sub<f32> for Vec4 {
1380 type Output = Self;
1381 #[inline]
sub(self, rhs: f32) -> Self1382 fn sub(self, rhs: f32) -> Self {
1383 Self(f32x4_sub(self.0, f32x4_splat(rhs)))
1384 }
1385 }
1386
1387 impl Sub<&f32> for Vec4 {
1388 type Output = Vec4;
1389 #[inline]
sub(self, rhs: &f32) -> Vec41390 fn sub(self, rhs: &f32) -> Vec4 {
1391 self.sub(*rhs)
1392 }
1393 }
1394
1395 impl Sub<&f32> for &Vec4 {
1396 type Output = Vec4;
1397 #[inline]
sub(self, rhs: &f32) -> Vec41398 fn sub(self, rhs: &f32) -> Vec4 {
1399 (*self).sub(*rhs)
1400 }
1401 }
1402
1403 impl Sub<f32> for &Vec4 {
1404 type Output = Vec4;
1405 #[inline]
sub(self, rhs: f32) -> Vec41406 fn sub(self, rhs: f32) -> Vec4 {
1407 (*self).sub(rhs)
1408 }
1409 }
1410
1411 impl SubAssign<f32> for Vec4 {
1412 #[inline]
sub_assign(&mut self, rhs: f32)1413 fn sub_assign(&mut self, rhs: f32) {
1414 self.0 = f32x4_sub(self.0, f32x4_splat(rhs))
1415 }
1416 }
1417
1418 impl SubAssign<&f32> for Vec4 {
1419 #[inline]
sub_assign(&mut self, rhs: &f32)1420 fn sub_assign(&mut self, rhs: &f32) {
1421 self.sub_assign(*rhs)
1422 }
1423 }
1424
1425 impl Sub<Vec4> for f32 {
1426 type Output = Vec4;
1427 #[inline]
sub(self, rhs: Vec4) -> Vec41428 fn sub(self, rhs: Vec4) -> Vec4 {
1429 Vec4(f32x4_sub(f32x4_splat(self), rhs.0))
1430 }
1431 }
1432
1433 impl Sub<&Vec4> for f32 {
1434 type Output = Vec4;
1435 #[inline]
sub(self, rhs: &Vec4) -> Vec41436 fn sub(self, rhs: &Vec4) -> Vec4 {
1437 self.sub(*rhs)
1438 }
1439 }
1440
1441 impl Sub<&Vec4> for &f32 {
1442 type Output = Vec4;
1443 #[inline]
sub(self, rhs: &Vec4) -> Vec41444 fn sub(self, rhs: &Vec4) -> Vec4 {
1445 (*self).sub(*rhs)
1446 }
1447 }
1448
1449 impl Sub<Vec4> for &f32 {
1450 type Output = Vec4;
1451 #[inline]
sub(self, rhs: Vec4) -> Vec41452 fn sub(self, rhs: Vec4) -> Vec4 {
1453 (*self).sub(rhs)
1454 }
1455 }
1456
1457 impl Rem<Vec4> for Vec4 {
1458 type Output = Self;
1459 #[inline]
rem(self, rhs: Self) -> Self1460 fn rem(self, rhs: Self) -> Self {
1461 let n = f32x4_floor(f32x4_div(self.0, rhs.0));
1462 Self(f32x4_sub(self.0, f32x4_mul(n, rhs.0)))
1463 }
1464 }
1465
1466 impl Rem<&Vec4> for Vec4 {
1467 type Output = Vec4;
1468 #[inline]
rem(self, rhs: &Vec4) -> Vec41469 fn rem(self, rhs: &Vec4) -> Vec4 {
1470 self.rem(*rhs)
1471 }
1472 }
1473
1474 impl Rem<&Vec4> for &Vec4 {
1475 type Output = Vec4;
1476 #[inline]
rem(self, rhs: &Vec4) -> Vec41477 fn rem(self, rhs: &Vec4) -> Vec4 {
1478 (*self).rem(*rhs)
1479 }
1480 }
1481
1482 impl Rem<Vec4> for &Vec4 {
1483 type Output = Vec4;
1484 #[inline]
rem(self, rhs: Vec4) -> Vec41485 fn rem(self, rhs: Vec4) -> Vec4 {
1486 (*self).rem(rhs)
1487 }
1488 }
1489
1490 impl RemAssign<Vec4> for Vec4 {
1491 #[inline]
rem_assign(&mut self, rhs: Self)1492 fn rem_assign(&mut self, rhs: Self) {
1493 *self = self.rem(rhs);
1494 }
1495 }
1496
1497 impl RemAssign<&Vec4> for Vec4 {
1498 #[inline]
rem_assign(&mut self, rhs: &Vec4)1499 fn rem_assign(&mut self, rhs: &Vec4) {
1500 self.rem_assign(*rhs)
1501 }
1502 }
1503
1504 impl Rem<f32> for Vec4 {
1505 type Output = Self;
1506 #[inline]
rem(self, rhs: f32) -> Self1507 fn rem(self, rhs: f32) -> Self {
1508 self.rem(Self::splat(rhs))
1509 }
1510 }
1511
1512 impl Rem<&f32> for Vec4 {
1513 type Output = Vec4;
1514 #[inline]
rem(self, rhs: &f32) -> Vec41515 fn rem(self, rhs: &f32) -> Vec4 {
1516 self.rem(*rhs)
1517 }
1518 }
1519
1520 impl Rem<&f32> for &Vec4 {
1521 type Output = Vec4;
1522 #[inline]
rem(self, rhs: &f32) -> Vec41523 fn rem(self, rhs: &f32) -> Vec4 {
1524 (*self).rem(*rhs)
1525 }
1526 }
1527
1528 impl Rem<f32> for &Vec4 {
1529 type Output = Vec4;
1530 #[inline]
rem(self, rhs: f32) -> Vec41531 fn rem(self, rhs: f32) -> Vec4 {
1532 (*self).rem(rhs)
1533 }
1534 }
1535
1536 impl RemAssign<f32> for Vec4 {
1537 #[inline]
rem_assign(&mut self, rhs: f32)1538 fn rem_assign(&mut self, rhs: f32) {
1539 *self = self.rem(Self::splat(rhs));
1540 }
1541 }
1542
1543 impl RemAssign<&f32> for Vec4 {
1544 #[inline]
rem_assign(&mut self, rhs: &f32)1545 fn rem_assign(&mut self, rhs: &f32) {
1546 self.rem_assign(*rhs)
1547 }
1548 }
1549
1550 impl Rem<Vec4> for f32 {
1551 type Output = Vec4;
1552 #[inline]
rem(self, rhs: Vec4) -> Vec41553 fn rem(self, rhs: Vec4) -> Vec4 {
1554 Vec4::splat(self).rem(rhs)
1555 }
1556 }
1557
1558 impl Rem<&Vec4> for f32 {
1559 type Output = Vec4;
1560 #[inline]
rem(self, rhs: &Vec4) -> Vec41561 fn rem(self, rhs: &Vec4) -> Vec4 {
1562 self.rem(*rhs)
1563 }
1564 }
1565
1566 impl Rem<&Vec4> for &f32 {
1567 type Output = Vec4;
1568 #[inline]
rem(self, rhs: &Vec4) -> Vec41569 fn rem(self, rhs: &Vec4) -> Vec4 {
1570 (*self).rem(*rhs)
1571 }
1572 }
1573
1574 impl Rem<Vec4> for &f32 {
1575 type Output = Vec4;
1576 #[inline]
rem(self, rhs: Vec4) -> Vec41577 fn rem(self, rhs: Vec4) -> Vec4 {
1578 (*self).rem(rhs)
1579 }
1580 }
1581
1582 #[cfg(not(target_arch = "spirv"))]
1583 impl AsRef<[f32; 4]> for Vec4 {
1584 #[inline]
as_ref(&self) -> &[f32; 4]1585 fn as_ref(&self) -> &[f32; 4] {
1586 unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
1587 }
1588 }
1589
1590 #[cfg(not(target_arch = "spirv"))]
1591 impl AsMut<[f32; 4]> for Vec4 {
1592 #[inline]
as_mut(&mut self) -> &mut [f32; 4]1593 fn as_mut(&mut self) -> &mut [f32; 4] {
1594 unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
1595 }
1596 }
1597
1598 impl Sum for Vec4 {
1599 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = Self>,1600 fn sum<I>(iter: I) -> Self
1601 where
1602 I: Iterator<Item = Self>,
1603 {
1604 iter.fold(Self::ZERO, Self::add)
1605 }
1606 }
1607
1608 impl<'a> Sum<&'a Self> for Vec4 {
1609 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1610 fn sum<I>(iter: I) -> Self
1611 where
1612 I: Iterator<Item = &'a Self>,
1613 {
1614 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1615 }
1616 }
1617
1618 impl Product for Vec4 {
1619 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = Self>,1620 fn product<I>(iter: I) -> Self
1621 where
1622 I: Iterator<Item = Self>,
1623 {
1624 iter.fold(Self::ONE, Self::mul)
1625 }
1626 }
1627
1628 impl<'a> Product<&'a Self> for Vec4 {
1629 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1630 fn product<I>(iter: I) -> Self
1631 where
1632 I: Iterator<Item = &'a Self>,
1633 {
1634 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1635 }
1636 }
1637
1638 impl Neg for Vec4 {
1639 type Output = Self;
1640 #[inline]
neg(self) -> Self1641 fn neg(self) -> Self {
1642 Self(f32x4_neg(self.0))
1643 }
1644 }
1645
1646 impl Neg for &Vec4 {
1647 type Output = Vec4;
1648 #[inline]
neg(self) -> Vec41649 fn neg(self) -> Vec4 {
1650 (*self).neg()
1651 }
1652 }
1653
1654 impl Index<usize> for Vec4 {
1655 type Output = f32;
1656 #[inline]
index(&self, index: usize) -> &Self::Output1657 fn index(&self, index: usize) -> &Self::Output {
1658 match index {
1659 0 => &self.x,
1660 1 => &self.y,
1661 2 => &self.z,
1662 3 => &self.w,
1663 _ => panic!("index out of bounds"),
1664 }
1665 }
1666 }
1667
1668 impl IndexMut<usize> for Vec4 {
1669 #[inline]
index_mut(&mut self, index: usize) -> &mut Self::Output1670 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1671 match index {
1672 0 => &mut self.x,
1673 1 => &mut self.y,
1674 2 => &mut self.z,
1675 3 => &mut self.w,
1676 _ => panic!("index out of bounds"),
1677 }
1678 }
1679 }
1680
1681 impl fmt::Display for Vec4 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1682 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1683 if let Some(p) = f.precision() {
1684 write!(
1685 f,
1686 "[{:.*}, {:.*}, {:.*}, {:.*}]",
1687 p, self.x, p, self.y, p, self.z, p, self.w
1688 )
1689 } else {
1690 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1691 }
1692 }
1693 }
1694
1695 impl fmt::Debug for Vec4 {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result1696 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1697 fmt.debug_tuple(stringify!(Vec4))
1698 .field(&self.x)
1699 .field(&self.y)
1700 .field(&self.z)
1701 .field(&self.w)
1702 .finish()
1703 }
1704 }
1705
1706 impl From<Vec4> for v128 {
1707 #[inline(always)]
from(t: Vec4) -> Self1708 fn from(t: Vec4) -> Self {
1709 t.0
1710 }
1711 }
1712
1713 impl From<v128> for Vec4 {
1714 #[inline(always)]
from(t: v128) -> Self1715 fn from(t: v128) -> Self {
1716 Self(t)
1717 }
1718 }
1719
1720 impl From<[f32; 4]> for Vec4 {
1721 #[inline]
from(a: [f32; 4]) -> Self1722 fn from(a: [f32; 4]) -> Self {
1723 Self::new(a[0], a[1], a[2], a[3])
1724 }
1725 }
1726
1727 impl From<Vec4> for [f32; 4] {
1728 #[inline]
from(v: Vec4) -> Self1729 fn from(v: Vec4) -> Self {
1730 unsafe { *(&v.0 as *const v128 as *const Self) }
1731 }
1732 }
1733
1734 impl From<(f32, f32, f32, f32)> for Vec4 {
1735 #[inline]
from(t: (f32, f32, f32, f32)) -> Self1736 fn from(t: (f32, f32, f32, f32)) -> Self {
1737 Self::new(t.0, t.1, t.2, t.3)
1738 }
1739 }
1740
1741 impl From<Vec4> for (f32, f32, f32, f32) {
1742 #[inline]
from(v: Vec4) -> Self1743 fn from(v: Vec4) -> Self {
1744 unsafe { *(&v.0 as *const v128 as *const Self) }
1745 }
1746 }
1747
1748 impl From<(Vec3A, f32)> for Vec4 {
1749 #[inline]
from((v, w): (Vec3A, f32)) -> Self1750 fn from((v, w): (Vec3A, f32)) -> Self {
1751 v.extend(w)
1752 }
1753 }
1754
1755 impl From<(f32, Vec3A)> for Vec4 {
1756 #[inline]
from((x, v): (f32, Vec3A)) -> Self1757 fn from((x, v): (f32, Vec3A)) -> Self {
1758 Self::new(x, v.x, v.y, v.z)
1759 }
1760 }
1761
1762 impl From<(Vec3, f32)> for Vec4 {
1763 #[inline]
from((v, w): (Vec3, f32)) -> Self1764 fn from((v, w): (Vec3, f32)) -> Self {
1765 Self::new(v.x, v.y, v.z, w)
1766 }
1767 }
1768
1769 impl From<(f32, Vec3)> for Vec4 {
1770 #[inline]
from((x, v): (f32, Vec3)) -> Self1771 fn from((x, v): (f32, Vec3)) -> Self {
1772 Self::new(x, v.x, v.y, v.z)
1773 }
1774 }
1775
1776 impl From<(Vec2, f32, f32)> for Vec4 {
1777 #[inline]
from((v, z, w): (Vec2, f32, f32)) -> Self1778 fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1779 Self::new(v.x, v.y, z, w)
1780 }
1781 }
1782
1783 impl From<(Vec2, Vec2)> for Vec4 {
1784 #[inline]
from((v, u): (Vec2, Vec2)) -> Self1785 fn from((v, u): (Vec2, Vec2)) -> Self {
1786 Self::new(v.x, v.y, u.x, u.y)
1787 }
1788 }
1789
1790 impl Deref for Vec4 {
1791 type Target = crate::deref::Vec4<f32>;
1792 #[inline]
deref(&self) -> &Self::Target1793 fn deref(&self) -> &Self::Target {
1794 unsafe { &*(self as *const Self).cast() }
1795 }
1796 }
1797
1798 impl DerefMut for Vec4 {
1799 #[inline]
deref_mut(&mut self) -> &mut Self::Target1800 fn deref_mut(&mut self) -> &mut Self::Target {
1801 unsafe { &mut *(self as *mut Self).cast() }
1802 }
1803 }
1804
1805 impl From<BVec4> for Vec4 {
1806 #[inline]
from(v: BVec4) -> Self1807 fn from(v: BVec4) -> Self {
1808 Self::new(
1809 f32::from(v.x),
1810 f32::from(v.y),
1811 f32::from(v.z),
1812 f32::from(v.w),
1813 )
1814 }
1815 }
1816
1817 #[cfg(not(feature = "scalar-math"))]
1818 impl From<BVec4A> for Vec4 {
1819 #[inline]
from(v: BVec4A) -> Self1820 fn from(v: BVec4A) -> Self {
1821 let bool_array: [bool; 4] = v.into();
1822 Self::new(
1823 f32::from(bool_array[0]),
1824 f32::from(bool_array[1]),
1825 f32::from(bool_array[2]),
1826 f32::from(bool_array[3]),
1827 )
1828 }
1829 }
1830