1 // Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3 #[cfg(not(feature = "scalar-math"))]
4 use crate::BVec4A;
5 use crate::{BVec4, I16Vec4, I64Vec2, I64Vec3, I8Vec4, IVec4, U16Vec4, U64Vec4, U8Vec4, UVec4};
6
7 use core::fmt;
8 use core::iter::{Product, Sum};
9 use core::{f32, ops::*};
10
11 /// Creates a 4-dimensional vector.
12 #[inline(always)]
13 #[must_use]
i64vec4(x: i64, y: i64, z: i64, w: i64) -> I64Vec414 pub const fn i64vec4(x: i64, y: i64, z: i64, w: i64) -> I64Vec4 {
15 I64Vec4::new(x, y, z, w)
16 }
17
18 /// A 4-dimensional vector.
19 #[cfg_attr(not(target_arch = "spirv"), derive(Hash))]
20 #[derive(Clone, Copy, PartialEq, Eq)]
21 #[cfg_attr(feature = "cuda", repr(align(16)))]
22 #[cfg_attr(not(target_arch = "spirv"), repr(C))]
23 #[cfg_attr(target_arch = "spirv", repr(simd))]
24 pub struct I64Vec4 {
25 pub x: i64,
26 pub y: i64,
27 pub z: i64,
28 pub w: i64,
29 }
30
31 impl I64Vec4 {
32 /// All zeroes.
33 pub const ZERO: Self = Self::splat(0);
34
35 /// All ones.
36 pub const ONE: Self = Self::splat(1);
37
38 /// All negative ones.
39 pub const NEG_ONE: Self = Self::splat(-1);
40
41 /// All `i64::MIN`.
42 pub const MIN: Self = Self::splat(i64::MIN);
43
44 /// All `i64::MAX`.
45 pub const MAX: Self = Self::splat(i64::MAX);
46
47 /// A unit vector pointing along the positive X axis.
48 pub const X: Self = Self::new(1, 0, 0, 0);
49
50 /// A unit vector pointing along the positive Y axis.
51 pub const Y: Self = Self::new(0, 1, 0, 0);
52
53 /// A unit vector pointing along the positive Z axis.
54 pub const Z: Self = Self::new(0, 0, 1, 0);
55
56 /// A unit vector pointing along the positive W axis.
57 pub const W: Self = Self::new(0, 0, 0, 1);
58
59 /// A unit vector pointing along the negative X axis.
60 pub const NEG_X: Self = Self::new(-1, 0, 0, 0);
61
62 /// A unit vector pointing along the negative Y axis.
63 pub const NEG_Y: Self = Self::new(0, -1, 0, 0);
64
65 /// A unit vector pointing along the negative Z axis.
66 pub const NEG_Z: Self = Self::new(0, 0, -1, 0);
67
68 /// A unit vector pointing along the negative W axis.
69 pub const NEG_W: Self = Self::new(0, 0, 0, -1);
70
71 /// The unit axes.
72 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
73
74 /// Creates a new vector.
75 #[inline(always)]
76 #[must_use]
new(x: i64, y: i64, z: i64, w: i64) -> Self77 pub const fn new(x: i64, y: i64, z: i64, w: i64) -> Self {
78 Self { x, y, z, w }
79 }
80
81 /// Creates a vector with all elements set to `v`.
82 #[inline]
83 #[must_use]
splat(v: i64) -> Self84 pub const fn splat(v: i64) -> Self {
85 Self {
86 x: v,
87
88 y: v,
89
90 z: v,
91
92 w: v,
93 }
94 }
95
96 /// Returns a vector containing each element of `self` modified by a mapping function `f`.
97 #[inline]
98 #[must_use]
map<F>(self, f: F) -> Self where F: Fn(i64) -> i64,99 pub fn map<F>(self, f: F) -> Self
100 where
101 F: Fn(i64) -> i64,
102 {
103 Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
104 }
105
106 /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
107 /// for each element of `self`.
108 ///
109 /// A true element in the mask uses the corresponding element from `if_true`, and false
110 /// uses the element from `if_false`.
111 #[inline]
112 #[must_use]
select(mask: BVec4, if_true: Self, if_false: Self) -> Self113 pub fn select(mask: BVec4, if_true: Self, if_false: Self) -> Self {
114 Self {
115 x: if mask.test(0) { if_true.x } else { if_false.x },
116 y: if mask.test(1) { if_true.y } else { if_false.y },
117 z: if mask.test(2) { if_true.z } else { if_false.z },
118 w: if mask.test(3) { if_true.w } else { if_false.w },
119 }
120 }
121
122 /// Creates a new vector from an array.
123 #[inline]
124 #[must_use]
from_array(a: [i64; 4]) -> Self125 pub const fn from_array(a: [i64; 4]) -> Self {
126 Self::new(a[0], a[1], a[2], a[3])
127 }
128
129 /// `[x, y, z, w]`
130 #[inline]
131 #[must_use]
to_array(&self) -> [i64; 4]132 pub const fn to_array(&self) -> [i64; 4] {
133 [self.x, self.y, self.z, self.w]
134 }
135
136 /// Creates a vector from the first 4 values in `slice`.
137 ///
138 /// # Panics
139 ///
140 /// Panics if `slice` is less than 4 elements long.
141 #[inline]
142 #[must_use]
from_slice(slice: &[i64]) -> Self143 pub const fn from_slice(slice: &[i64]) -> Self {
144 assert!(slice.len() >= 4);
145 Self::new(slice[0], slice[1], slice[2], slice[3])
146 }
147
148 /// Writes the elements of `self` to the first 4 elements in `slice`.
149 ///
150 /// # Panics
151 ///
152 /// Panics if `slice` is less than 4 elements long.
153 #[inline]
write_to_slice(self, slice: &mut [i64])154 pub fn write_to_slice(self, slice: &mut [i64]) {
155 slice[..4].copy_from_slice(&self.to_array());
156 }
157
158 /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
159 ///
160 /// Truncation to [`I64Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
161 #[inline]
162 #[must_use]
truncate(self) -> I64Vec3163 pub fn truncate(self) -> I64Vec3 {
164 use crate::swizzles::Vec4Swizzles;
165 self.xyz()
166 }
167
168 /// Creates a 4D vector from `self` with the given value of `x`.
169 #[inline]
170 #[must_use]
with_x(mut self, x: i64) -> Self171 pub fn with_x(mut self, x: i64) -> Self {
172 self.x = x;
173 self
174 }
175
176 /// Creates a 4D vector from `self` with the given value of `y`.
177 #[inline]
178 #[must_use]
with_y(mut self, y: i64) -> Self179 pub fn with_y(mut self, y: i64) -> Self {
180 self.y = y;
181 self
182 }
183
184 /// Creates a 4D vector from `self` with the given value of `z`.
185 #[inline]
186 #[must_use]
with_z(mut self, z: i64) -> Self187 pub fn with_z(mut self, z: i64) -> Self {
188 self.z = z;
189 self
190 }
191
192 /// Creates a 4D vector from `self` with the given value of `w`.
193 #[inline]
194 #[must_use]
with_w(mut self, w: i64) -> Self195 pub fn with_w(mut self, w: i64) -> Self {
196 self.w = w;
197 self
198 }
199
200 /// Computes the dot product of `self` and `rhs`.
201 #[inline]
202 #[must_use]
dot(self, rhs: Self) -> i64203 pub fn dot(self, rhs: Self) -> i64 {
204 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z) + (self.w * rhs.w)
205 }
206
207 /// Returns a vector where every component is the dot product of `self` and `rhs`.
208 #[inline]
209 #[must_use]
dot_into_vec(self, rhs: Self) -> Self210 pub fn dot_into_vec(self, rhs: Self) -> Self {
211 Self::splat(self.dot(rhs))
212 }
213
214 /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
215 ///
216 /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`.
217 #[inline]
218 #[must_use]
min(self, rhs: Self) -> Self219 pub fn min(self, rhs: Self) -> Self {
220 Self {
221 x: self.x.min(rhs.x),
222 y: self.y.min(rhs.y),
223 z: self.z.min(rhs.z),
224 w: self.w.min(rhs.w),
225 }
226 }
227
228 /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
229 ///
230 /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`.
231 #[inline]
232 #[must_use]
max(self, rhs: Self) -> Self233 pub fn max(self, rhs: Self) -> Self {
234 Self {
235 x: self.x.max(rhs.x),
236 y: self.y.max(rhs.y),
237 z: self.z.max(rhs.z),
238 w: self.w.max(rhs.w),
239 }
240 }
241
242 /// Component-wise clamping of values, similar to [`i64::clamp`].
243 ///
244 /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
245 ///
246 /// # Panics
247 ///
248 /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
249 #[inline]
250 #[must_use]
clamp(self, min: Self, max: Self) -> Self251 pub fn clamp(self, min: Self, max: Self) -> Self {
252 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
253 self.max(min).min(max)
254 }
255
256 /// Returns the horizontal minimum of `self`.
257 ///
258 /// In other words this computes `min(x, y, ..)`.
259 #[inline]
260 #[must_use]
min_element(self) -> i64261 pub fn min_element(self) -> i64 {
262 self.x.min(self.y.min(self.z.min(self.w)))
263 }
264
265 /// Returns the horizontal maximum of `self`.
266 ///
267 /// In other words this computes `max(x, y, ..)`.
268 #[inline]
269 #[must_use]
max_element(self) -> i64270 pub fn max_element(self) -> i64 {
271 self.x.max(self.y.max(self.z.max(self.w)))
272 }
273
274 /// Returns the sum of all elements of `self`.
275 ///
276 /// In other words, this computes `self.x + self.y + ..`.
277 #[inline]
278 #[must_use]
element_sum(self) -> i64279 pub fn element_sum(self) -> i64 {
280 self.x + self.y + self.z + self.w
281 }
282
283 /// Returns the product of all elements of `self`.
284 ///
285 /// In other words, this computes `self.x * self.y * ..`.
286 #[inline]
287 #[must_use]
element_product(self) -> i64288 pub fn element_product(self) -> i64 {
289 self.x * self.y * self.z * self.w
290 }
291
292 /// Returns a vector mask containing the result of a `==` comparison for each element of
293 /// `self` and `rhs`.
294 ///
295 /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
296 /// elements.
297 #[inline]
298 #[must_use]
cmpeq(self, rhs: Self) -> BVec4299 pub fn cmpeq(self, rhs: Self) -> BVec4 {
300 BVec4::new(
301 self.x.eq(&rhs.x),
302 self.y.eq(&rhs.y),
303 self.z.eq(&rhs.z),
304 self.w.eq(&rhs.w),
305 )
306 }
307
308 /// Returns a vector mask containing the result of a `!=` comparison for each element of
309 /// `self` and `rhs`.
310 ///
311 /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
312 /// elements.
313 #[inline]
314 #[must_use]
cmpne(self, rhs: Self) -> BVec4315 pub fn cmpne(self, rhs: Self) -> BVec4 {
316 BVec4::new(
317 self.x.ne(&rhs.x),
318 self.y.ne(&rhs.y),
319 self.z.ne(&rhs.z),
320 self.w.ne(&rhs.w),
321 )
322 }
323
324 /// Returns a vector mask containing the result of a `>=` comparison for each element of
325 /// `self` and `rhs`.
326 ///
327 /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
328 /// elements.
329 #[inline]
330 #[must_use]
cmpge(self, rhs: Self) -> BVec4331 pub fn cmpge(self, rhs: Self) -> BVec4 {
332 BVec4::new(
333 self.x.ge(&rhs.x),
334 self.y.ge(&rhs.y),
335 self.z.ge(&rhs.z),
336 self.w.ge(&rhs.w),
337 )
338 }
339
340 /// Returns a vector mask containing the result of a `>` comparison for each element of
341 /// `self` and `rhs`.
342 ///
343 /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
344 /// elements.
345 #[inline]
346 #[must_use]
cmpgt(self, rhs: Self) -> BVec4347 pub fn cmpgt(self, rhs: Self) -> BVec4 {
348 BVec4::new(
349 self.x.gt(&rhs.x),
350 self.y.gt(&rhs.y),
351 self.z.gt(&rhs.z),
352 self.w.gt(&rhs.w),
353 )
354 }
355
356 /// Returns a vector mask containing the result of a `<=` comparison for each element of
357 /// `self` and `rhs`.
358 ///
359 /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
360 /// elements.
361 #[inline]
362 #[must_use]
cmple(self, rhs: Self) -> BVec4363 pub fn cmple(self, rhs: Self) -> BVec4 {
364 BVec4::new(
365 self.x.le(&rhs.x),
366 self.y.le(&rhs.y),
367 self.z.le(&rhs.z),
368 self.w.le(&rhs.w),
369 )
370 }
371
372 /// Returns a vector mask containing the result of a `<` comparison for each element of
373 /// `self` and `rhs`.
374 ///
375 /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
376 /// elements.
377 #[inline]
378 #[must_use]
cmplt(self, rhs: Self) -> BVec4379 pub fn cmplt(self, rhs: Self) -> BVec4 {
380 BVec4::new(
381 self.x.lt(&rhs.x),
382 self.y.lt(&rhs.y),
383 self.z.lt(&rhs.z),
384 self.w.lt(&rhs.w),
385 )
386 }
387
388 /// Returns a vector containing the absolute value of each element of `self`.
389 #[inline]
390 #[must_use]
abs(self) -> Self391 pub fn abs(self) -> Self {
392 Self {
393 x: self.x.abs(),
394 y: self.y.abs(),
395 z: self.z.abs(),
396 w: self.w.abs(),
397 }
398 }
399
400 /// Returns a vector with elements representing the sign of `self`.
401 ///
402 /// - `0` if the number is zero
403 /// - `1` if the number is positive
404 /// - `-1` if the number is negative
405 #[inline]
406 #[must_use]
signum(self) -> Self407 pub fn signum(self) -> Self {
408 Self {
409 x: self.x.signum(),
410 y: self.y.signum(),
411 z: self.z.signum(),
412 w: self.w.signum(),
413 }
414 }
415
416 /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
417 ///
418 /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes
419 /// into the first lowest bit, element `y` into the second, etc.
420 #[inline]
421 #[must_use]
is_negative_bitmask(self) -> u32422 pub fn is_negative_bitmask(self) -> u32 {
423 (self.x.is_negative() as u32)
424 | (self.y.is_negative() as u32) << 1
425 | (self.z.is_negative() as u32) << 2
426 | (self.w.is_negative() as u32) << 3
427 }
428
429 /// Computes the squared length of `self`.
430 #[doc(alias = "magnitude2")]
431 #[inline]
432 #[must_use]
length_squared(self) -> i64433 pub fn length_squared(self) -> i64 {
434 self.dot(self)
435 }
436
437 /// Compute the squared euclidean distance between two points in space.
438 #[inline]
439 #[must_use]
distance_squared(self, rhs: Self) -> i64440 pub fn distance_squared(self, rhs: Self) -> i64 {
441 (self - rhs).length_squared()
442 }
443
444 /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
445 ///
446 /// # Panics
447 /// This function will panic if any `rhs` element is 0 or the division results in overflow.
448 #[inline]
449 #[must_use]
div_euclid(self, rhs: Self) -> Self450 pub fn div_euclid(self, rhs: Self) -> Self {
451 Self::new(
452 self.x.div_euclid(rhs.x),
453 self.y.div_euclid(rhs.y),
454 self.z.div_euclid(rhs.z),
455 self.w.div_euclid(rhs.w),
456 )
457 }
458
459 /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
460 ///
461 /// # Panics
462 /// This function will panic if any `rhs` element is 0 or the division results in overflow.
463 ///
464 /// [Euclidean division]: i64::rem_euclid
465 #[inline]
466 #[must_use]
rem_euclid(self, rhs: Self) -> Self467 pub fn rem_euclid(self, rhs: Self) -> Self {
468 Self::new(
469 self.x.rem_euclid(rhs.x),
470 self.y.rem_euclid(rhs.y),
471 self.z.rem_euclid(rhs.z),
472 self.w.rem_euclid(rhs.w),
473 )
474 }
475
476 /// Computes the [manhattan distance] between two points.
477 ///
478 /// # Overflow
479 /// This method may overflow if the result is greater than [`u64::MAX`].
480 ///
481 /// See also [`checked_manhattan_distance`][I64Vec4::checked_manhattan_distance].
482 ///
483 /// [manhattan distance]: https://en.wikipedia.org/wiki/Taxicab_geometry
484 #[inline]
485 #[must_use]
manhattan_distance(self, other: Self) -> u64486 pub fn manhattan_distance(self, other: Self) -> u64 {
487 self.x.abs_diff(other.x)
488 + self.y.abs_diff(other.y)
489 + self.z.abs_diff(other.z)
490 + self.w.abs_diff(other.w)
491 }
492
493 /// Computes the [manhattan distance] between two points.
494 ///
495 /// This will returns [`None`] if the result is greater than [`u64::MAX`].
496 ///
497 /// [manhattan distance]: https://en.wikipedia.org/wiki/Taxicab_geometry
498 #[inline]
499 #[must_use]
checked_manhattan_distance(self, other: Self) -> Option<u64>500 pub fn checked_manhattan_distance(self, other: Self) -> Option<u64> {
501 let d = self.x.abs_diff(other.x);
502 let d = d.checked_add(self.y.abs_diff(other.y))?;
503 let d = d.checked_add(self.z.abs_diff(other.z))?;
504 d.checked_add(self.w.abs_diff(other.w))
505 }
506
507 /// Computes the [chebyshev distance] between two points.
508 ///
509 /// [chebyshev distance]: https://en.wikipedia.org/wiki/Chebyshev_distance
510 #[inline]
511 #[must_use]
chebyshev_distance(self, other: Self) -> u64512 pub fn chebyshev_distance(self, other: Self) -> u64 {
513 // Note: the compiler will eventually optimize out the loop
514 [
515 self.x.abs_diff(other.x),
516 self.y.abs_diff(other.y),
517 self.z.abs_diff(other.z),
518 self.w.abs_diff(other.w),
519 ]
520 .into_iter()
521 .max()
522 .unwrap()
523 }
524
525 /// Casts all elements of `self` to `f32`.
526 #[inline]
527 #[must_use]
as_vec4(&self) -> crate::Vec4528 pub fn as_vec4(&self) -> crate::Vec4 {
529 crate::Vec4::new(self.x as f32, self.y as f32, self.z as f32, self.w as f32)
530 }
531
532 /// Casts all elements of `self` to `f64`.
533 #[inline]
534 #[must_use]
as_dvec4(&self) -> crate::DVec4535 pub fn as_dvec4(&self) -> crate::DVec4 {
536 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
537 }
538
539 /// Casts all elements of `self` to `i8`.
540 #[inline]
541 #[must_use]
as_i8vec4(&self) -> crate::I8Vec4542 pub fn as_i8vec4(&self) -> crate::I8Vec4 {
543 crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
544 }
545
546 /// Casts all elements of `self` to `u8`.
547 #[inline]
548 #[must_use]
as_u8vec4(&self) -> crate::U8Vec4549 pub fn as_u8vec4(&self) -> crate::U8Vec4 {
550 crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
551 }
552
553 /// Casts all elements of `self` to `i16`.
554 #[inline]
555 #[must_use]
as_i16vec4(&self) -> crate::I16Vec4556 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
557 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
558 }
559
560 /// Casts all elements of `self` to `u16`.
561 #[inline]
562 #[must_use]
as_u16vec4(&self) -> crate::U16Vec4563 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
564 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
565 }
566
567 /// Casts all elements of `self` to `i32`.
568 #[inline]
569 #[must_use]
as_ivec4(&self) -> crate::IVec4570 pub fn as_ivec4(&self) -> crate::IVec4 {
571 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
572 }
573
574 /// Casts all elements of `self` to `u32`.
575 #[inline]
576 #[must_use]
as_uvec4(&self) -> crate::UVec4577 pub fn as_uvec4(&self) -> crate::UVec4 {
578 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
579 }
580
581 /// Casts all elements of `self` to `u64`.
582 #[inline]
583 #[must_use]
as_u64vec4(&self) -> crate::U64Vec4584 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
585 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
586 }
587
588 /// Returns a vector containing the wrapping addition of `self` and `rhs`.
589 ///
590 /// In other words this computes `Some([self.x + rhs.x, self.y + rhs.y, ..])` but returns `None` on any overflow.
591 #[inline]
592 #[must_use]
checked_add(self, rhs: Self) -> Option<Self>593 pub const fn checked_add(self, rhs: Self) -> Option<Self> {
594 let x = match self.x.checked_add(rhs.x) {
595 Some(v) => v,
596 None => return None,
597 };
598 let y = match self.y.checked_add(rhs.y) {
599 Some(v) => v,
600 None => return None,
601 };
602 let z = match self.z.checked_add(rhs.z) {
603 Some(v) => v,
604 None => return None,
605 };
606 let w = match self.w.checked_add(rhs.w) {
607 Some(v) => v,
608 None => return None,
609 };
610
611 Some(Self { x, y, z, w })
612 }
613
614 /// Returns a vector containing the wrapping subtraction of `self` and `rhs`.
615 ///
616 /// In other words this computes `Some([self.x - rhs.x, self.y - rhs.y, ..])` but returns `None` on any overflow.
617 #[inline]
618 #[must_use]
checked_sub(self, rhs: Self) -> Option<Self>619 pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
620 let x = match self.x.checked_sub(rhs.x) {
621 Some(v) => v,
622 None => return None,
623 };
624 let y = match self.y.checked_sub(rhs.y) {
625 Some(v) => v,
626 None => return None,
627 };
628 let z = match self.z.checked_sub(rhs.z) {
629 Some(v) => v,
630 None => return None,
631 };
632 let w = match self.w.checked_sub(rhs.w) {
633 Some(v) => v,
634 None => return None,
635 };
636
637 Some(Self { x, y, z, w })
638 }
639
640 /// Returns a vector containing the wrapping multiplication of `self` and `rhs`.
641 ///
642 /// In other words this computes `Some([self.x * rhs.x, self.y * rhs.y, ..])` but returns `None` on any overflow.
643 #[inline]
644 #[must_use]
checked_mul(self, rhs: Self) -> Option<Self>645 pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
646 let x = match self.x.checked_mul(rhs.x) {
647 Some(v) => v,
648 None => return None,
649 };
650 let y = match self.y.checked_mul(rhs.y) {
651 Some(v) => v,
652 None => return None,
653 };
654 let z = match self.z.checked_mul(rhs.z) {
655 Some(v) => v,
656 None => return None,
657 };
658 let w = match self.w.checked_mul(rhs.w) {
659 Some(v) => v,
660 None => return None,
661 };
662
663 Some(Self { x, y, z, w })
664 }
665
666 /// Returns a vector containing the wrapping division of `self` and `rhs`.
667 ///
668 /// In other words this computes `Some([self.x / rhs.x, self.y / rhs.y, ..])` but returns `None` on any division by zero.
669 #[inline]
670 #[must_use]
checked_div(self, rhs: Self) -> Option<Self>671 pub const fn checked_div(self, rhs: Self) -> Option<Self> {
672 let x = match self.x.checked_div(rhs.x) {
673 Some(v) => v,
674 None => return None,
675 };
676 let y = match self.y.checked_div(rhs.y) {
677 Some(v) => v,
678 None => return None,
679 };
680 let z = match self.z.checked_div(rhs.z) {
681 Some(v) => v,
682 None => return None,
683 };
684 let w = match self.w.checked_div(rhs.w) {
685 Some(v) => v,
686 None => return None,
687 };
688
689 Some(Self { x, y, z, w })
690 }
691
692 /// Returns a vector containing the wrapping addition of `self` and `rhs`.
693 ///
694 /// In other words this computes `[self.x.wrapping_add(rhs.x), self.y.wrapping_add(rhs.y), ..]`.
695 #[inline]
696 #[must_use]
wrapping_add(self, rhs: Self) -> Self697 pub const fn wrapping_add(self, rhs: Self) -> Self {
698 Self {
699 x: self.x.wrapping_add(rhs.x),
700 y: self.y.wrapping_add(rhs.y),
701 z: self.z.wrapping_add(rhs.z),
702 w: self.w.wrapping_add(rhs.w),
703 }
704 }
705
706 /// Returns a vector containing the wrapping subtraction of `self` and `rhs`.
707 ///
708 /// In other words this computes `[self.x.wrapping_sub(rhs.x), self.y.wrapping_sub(rhs.y), ..]`.
709 #[inline]
710 #[must_use]
wrapping_sub(self, rhs: Self) -> Self711 pub const fn wrapping_sub(self, rhs: Self) -> Self {
712 Self {
713 x: self.x.wrapping_sub(rhs.x),
714 y: self.y.wrapping_sub(rhs.y),
715 z: self.z.wrapping_sub(rhs.z),
716 w: self.w.wrapping_sub(rhs.w),
717 }
718 }
719
720 /// Returns a vector containing the wrapping multiplication of `self` and `rhs`.
721 ///
722 /// In other words this computes `[self.x.wrapping_mul(rhs.x), self.y.wrapping_mul(rhs.y), ..]`.
723 #[inline]
724 #[must_use]
wrapping_mul(self, rhs: Self) -> Self725 pub const fn wrapping_mul(self, rhs: Self) -> Self {
726 Self {
727 x: self.x.wrapping_mul(rhs.x),
728 y: self.y.wrapping_mul(rhs.y),
729 z: self.z.wrapping_mul(rhs.z),
730 w: self.w.wrapping_mul(rhs.w),
731 }
732 }
733
734 /// Returns a vector containing the wrapping division of `self` and `rhs`.
735 ///
736 /// In other words this computes `[self.x.wrapping_div(rhs.x), self.y.wrapping_div(rhs.y), ..]`.
737 #[inline]
738 #[must_use]
wrapping_div(self, rhs: Self) -> Self739 pub const fn wrapping_div(self, rhs: Self) -> Self {
740 Self {
741 x: self.x.wrapping_div(rhs.x),
742 y: self.y.wrapping_div(rhs.y),
743 z: self.z.wrapping_div(rhs.z),
744 w: self.w.wrapping_div(rhs.w),
745 }
746 }
747
748 /// Returns a vector containing the saturating addition of `self` and `rhs`.
749 ///
750 /// In other words this computes `[self.x.saturating_add(rhs.x), self.y.saturating_add(rhs.y), ..]`.
751 #[inline]
752 #[must_use]
saturating_add(self, rhs: Self) -> Self753 pub const fn saturating_add(self, rhs: Self) -> Self {
754 Self {
755 x: self.x.saturating_add(rhs.x),
756 y: self.y.saturating_add(rhs.y),
757 z: self.z.saturating_add(rhs.z),
758 w: self.w.saturating_add(rhs.w),
759 }
760 }
761
762 /// Returns a vector containing the saturating subtraction of `self` and `rhs`.
763 ///
764 /// In other words this computes `[self.x.saturating_sub(rhs.x), self.y.saturating_sub(rhs.y), ..]`.
765 #[inline]
766 #[must_use]
saturating_sub(self, rhs: Self) -> Self767 pub const fn saturating_sub(self, rhs: Self) -> Self {
768 Self {
769 x: self.x.saturating_sub(rhs.x),
770 y: self.y.saturating_sub(rhs.y),
771 z: self.z.saturating_sub(rhs.z),
772 w: self.w.saturating_sub(rhs.w),
773 }
774 }
775
776 /// Returns a vector containing the saturating multiplication of `self` and `rhs`.
777 ///
778 /// In other words this computes `[self.x.saturating_mul(rhs.x), self.y.saturating_mul(rhs.y), ..]`.
779 #[inline]
780 #[must_use]
saturating_mul(self, rhs: Self) -> Self781 pub const fn saturating_mul(self, rhs: Self) -> Self {
782 Self {
783 x: self.x.saturating_mul(rhs.x),
784 y: self.y.saturating_mul(rhs.y),
785 z: self.z.saturating_mul(rhs.z),
786 w: self.w.saturating_mul(rhs.w),
787 }
788 }
789
790 /// Returns a vector containing the saturating division of `self` and `rhs`.
791 ///
792 /// In other words this computes `[self.x.saturating_div(rhs.x), self.y.saturating_div(rhs.y), ..]`.
793 #[inline]
794 #[must_use]
saturating_div(self, rhs: Self) -> Self795 pub const fn saturating_div(self, rhs: Self) -> Self {
796 Self {
797 x: self.x.saturating_div(rhs.x),
798 y: self.y.saturating_div(rhs.y),
799 z: self.z.saturating_div(rhs.z),
800 w: self.w.saturating_div(rhs.w),
801 }
802 }
803
804 /// Returns a vector containing the wrapping addition of `self` and unsigned vector `rhs`.
805 ///
806 /// In other words this computes `Some([self.x + rhs.x, self.y + rhs.y, ..])` but returns `None` on any overflow.
807 #[inline]
808 #[must_use]
checked_add_unsigned(self, rhs: U64Vec4) -> Option<Self>809 pub const fn checked_add_unsigned(self, rhs: U64Vec4) -> Option<Self> {
810 let x = match self.x.checked_add_unsigned(rhs.x) {
811 Some(v) => v,
812 None => return None,
813 };
814 let y = match self.y.checked_add_unsigned(rhs.y) {
815 Some(v) => v,
816 None => return None,
817 };
818 let z = match self.z.checked_add_unsigned(rhs.z) {
819 Some(v) => v,
820 None => return None,
821 };
822 let w = match self.w.checked_add_unsigned(rhs.w) {
823 Some(v) => v,
824 None => return None,
825 };
826
827 Some(Self { x, y, z, w })
828 }
829
830 /// Returns a vector containing the wrapping subtraction of `self` and unsigned vector `rhs`.
831 ///
832 /// In other words this computes `Some([self.x - rhs.x, self.y - rhs.y, ..])` but returns `None` on any overflow.
833 #[inline]
834 #[must_use]
checked_sub_unsigned(self, rhs: U64Vec4) -> Option<Self>835 pub const fn checked_sub_unsigned(self, rhs: U64Vec4) -> Option<Self> {
836 let x = match self.x.checked_sub_unsigned(rhs.x) {
837 Some(v) => v,
838 None => return None,
839 };
840 let y = match self.y.checked_sub_unsigned(rhs.y) {
841 Some(v) => v,
842 None => return None,
843 };
844 let z = match self.z.checked_sub_unsigned(rhs.z) {
845 Some(v) => v,
846 None => return None,
847 };
848 let w = match self.w.checked_sub_unsigned(rhs.w) {
849 Some(v) => v,
850 None => return None,
851 };
852
853 Some(Self { x, y, z, w })
854 }
855
856 /// Returns a vector containing the wrapping addition of `self` and unsigned vector `rhs`.
857 ///
858 /// In other words this computes `[self.x.wrapping_add_unsigned(rhs.x), self.y.wrapping_add_unsigned(rhs.y), ..]`.
859 #[inline]
860 #[must_use]
wrapping_add_unsigned(self, rhs: U64Vec4) -> Self861 pub const fn wrapping_add_unsigned(self, rhs: U64Vec4) -> Self {
862 Self {
863 x: self.x.wrapping_add_unsigned(rhs.x),
864 y: self.y.wrapping_add_unsigned(rhs.y),
865 z: self.z.wrapping_add_unsigned(rhs.z),
866 w: self.w.wrapping_add_unsigned(rhs.w),
867 }
868 }
869
870 /// Returns a vector containing the wrapping subtraction of `self` and unsigned vector `rhs`.
871 ///
872 /// In other words this computes `[self.x.wrapping_sub_unsigned(rhs.x), self.y.wrapping_sub_unsigned(rhs.y), ..]`.
873 #[inline]
874 #[must_use]
wrapping_sub_unsigned(self, rhs: U64Vec4) -> Self875 pub const fn wrapping_sub_unsigned(self, rhs: U64Vec4) -> Self {
876 Self {
877 x: self.x.wrapping_sub_unsigned(rhs.x),
878 y: self.y.wrapping_sub_unsigned(rhs.y),
879 z: self.z.wrapping_sub_unsigned(rhs.z),
880 w: self.w.wrapping_sub_unsigned(rhs.w),
881 }
882 }
883
884 // Returns a vector containing the saturating addition of `self` and unsigned vector `rhs`.
885 ///
886 /// In other words this computes `[self.x.saturating_add_unsigned(rhs.x), self.y.saturating_add_unsigned(rhs.y), ..]`.
887 #[inline]
888 #[must_use]
saturating_add_unsigned(self, rhs: U64Vec4) -> Self889 pub const fn saturating_add_unsigned(self, rhs: U64Vec4) -> Self {
890 Self {
891 x: self.x.saturating_add_unsigned(rhs.x),
892 y: self.y.saturating_add_unsigned(rhs.y),
893 z: self.z.saturating_add_unsigned(rhs.z),
894 w: self.w.saturating_add_unsigned(rhs.w),
895 }
896 }
897
898 /// Returns a vector containing the saturating subtraction of `self` and unsigned vector `rhs`.
899 ///
900 /// In other words this computes `[self.x.saturating_sub_unsigned(rhs.x), self.y.saturating_sub_unsigned(rhs.y), ..]`.
901 #[inline]
902 #[must_use]
saturating_sub_unsigned(self, rhs: U64Vec4) -> Self903 pub const fn saturating_sub_unsigned(self, rhs: U64Vec4) -> Self {
904 Self {
905 x: self.x.saturating_sub_unsigned(rhs.x),
906 y: self.y.saturating_sub_unsigned(rhs.y),
907 z: self.z.saturating_sub_unsigned(rhs.z),
908 w: self.w.saturating_sub_unsigned(rhs.w),
909 }
910 }
911 }
912
913 impl Default for I64Vec4 {
914 #[inline(always)]
default() -> Self915 fn default() -> Self {
916 Self::ZERO
917 }
918 }
919
920 impl Div<I64Vec4> for I64Vec4 {
921 type Output = Self;
922 #[inline]
div(self, rhs: Self) -> Self923 fn div(self, rhs: Self) -> Self {
924 Self {
925 x: self.x.div(rhs.x),
926 y: self.y.div(rhs.y),
927 z: self.z.div(rhs.z),
928 w: self.w.div(rhs.w),
929 }
930 }
931 }
932
933 impl Div<&I64Vec4> for I64Vec4 {
934 type Output = I64Vec4;
935 #[inline]
div(self, rhs: &I64Vec4) -> I64Vec4936 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
937 self.div(*rhs)
938 }
939 }
940
941 impl Div<&I64Vec4> for &I64Vec4 {
942 type Output = I64Vec4;
943 #[inline]
div(self, rhs: &I64Vec4) -> I64Vec4944 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
945 (*self).div(*rhs)
946 }
947 }
948
949 impl Div<I64Vec4> for &I64Vec4 {
950 type Output = I64Vec4;
951 #[inline]
div(self, rhs: I64Vec4) -> I64Vec4952 fn div(self, rhs: I64Vec4) -> I64Vec4 {
953 (*self).div(rhs)
954 }
955 }
956
957 impl DivAssign<I64Vec4> for I64Vec4 {
958 #[inline]
div_assign(&mut self, rhs: Self)959 fn div_assign(&mut self, rhs: Self) {
960 self.x.div_assign(rhs.x);
961 self.y.div_assign(rhs.y);
962 self.z.div_assign(rhs.z);
963 self.w.div_assign(rhs.w);
964 }
965 }
966
967 impl DivAssign<&I64Vec4> for I64Vec4 {
968 #[inline]
div_assign(&mut self, rhs: &I64Vec4)969 fn div_assign(&mut self, rhs: &I64Vec4) {
970 self.div_assign(*rhs)
971 }
972 }
973
974 impl Div<i64> for I64Vec4 {
975 type Output = Self;
976 #[inline]
div(self, rhs: i64) -> Self977 fn div(self, rhs: i64) -> Self {
978 Self {
979 x: self.x.div(rhs),
980 y: self.y.div(rhs),
981 z: self.z.div(rhs),
982 w: self.w.div(rhs),
983 }
984 }
985 }
986
987 impl Div<&i64> for I64Vec4 {
988 type Output = I64Vec4;
989 #[inline]
div(self, rhs: &i64) -> I64Vec4990 fn div(self, rhs: &i64) -> I64Vec4 {
991 self.div(*rhs)
992 }
993 }
994
995 impl Div<&i64> for &I64Vec4 {
996 type Output = I64Vec4;
997 #[inline]
div(self, rhs: &i64) -> I64Vec4998 fn div(self, rhs: &i64) -> I64Vec4 {
999 (*self).div(*rhs)
1000 }
1001 }
1002
1003 impl Div<i64> for &I64Vec4 {
1004 type Output = I64Vec4;
1005 #[inline]
div(self, rhs: i64) -> I64Vec41006 fn div(self, rhs: i64) -> I64Vec4 {
1007 (*self).div(rhs)
1008 }
1009 }
1010
1011 impl DivAssign<i64> for I64Vec4 {
1012 #[inline]
div_assign(&mut self, rhs: i64)1013 fn div_assign(&mut self, rhs: i64) {
1014 self.x.div_assign(rhs);
1015 self.y.div_assign(rhs);
1016 self.z.div_assign(rhs);
1017 self.w.div_assign(rhs);
1018 }
1019 }
1020
1021 impl DivAssign<&i64> for I64Vec4 {
1022 #[inline]
div_assign(&mut self, rhs: &i64)1023 fn div_assign(&mut self, rhs: &i64) {
1024 self.div_assign(*rhs)
1025 }
1026 }
1027
1028 impl Div<I64Vec4> for i64 {
1029 type Output = I64Vec4;
1030 #[inline]
div(self, rhs: I64Vec4) -> I64Vec41031 fn div(self, rhs: I64Vec4) -> I64Vec4 {
1032 I64Vec4 {
1033 x: self.div(rhs.x),
1034 y: self.div(rhs.y),
1035 z: self.div(rhs.z),
1036 w: self.div(rhs.w),
1037 }
1038 }
1039 }
1040
1041 impl Div<&I64Vec4> for i64 {
1042 type Output = I64Vec4;
1043 #[inline]
div(self, rhs: &I64Vec4) -> I64Vec41044 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
1045 self.div(*rhs)
1046 }
1047 }
1048
1049 impl Div<&I64Vec4> for &i64 {
1050 type Output = I64Vec4;
1051 #[inline]
div(self, rhs: &I64Vec4) -> I64Vec41052 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
1053 (*self).div(*rhs)
1054 }
1055 }
1056
1057 impl Div<I64Vec4> for &i64 {
1058 type Output = I64Vec4;
1059 #[inline]
div(self, rhs: I64Vec4) -> I64Vec41060 fn div(self, rhs: I64Vec4) -> I64Vec4 {
1061 (*self).div(rhs)
1062 }
1063 }
1064
1065 impl Mul<I64Vec4> for I64Vec4 {
1066 type Output = Self;
1067 #[inline]
mul(self, rhs: Self) -> Self1068 fn mul(self, rhs: Self) -> Self {
1069 Self {
1070 x: self.x.mul(rhs.x),
1071 y: self.y.mul(rhs.y),
1072 z: self.z.mul(rhs.z),
1073 w: self.w.mul(rhs.w),
1074 }
1075 }
1076 }
1077
1078 impl Mul<&I64Vec4> for I64Vec4 {
1079 type Output = I64Vec4;
1080 #[inline]
mul(self, rhs: &I64Vec4) -> I64Vec41081 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
1082 self.mul(*rhs)
1083 }
1084 }
1085
1086 impl Mul<&I64Vec4> for &I64Vec4 {
1087 type Output = I64Vec4;
1088 #[inline]
mul(self, rhs: &I64Vec4) -> I64Vec41089 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
1090 (*self).mul(*rhs)
1091 }
1092 }
1093
1094 impl Mul<I64Vec4> for &I64Vec4 {
1095 type Output = I64Vec4;
1096 #[inline]
mul(self, rhs: I64Vec4) -> I64Vec41097 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
1098 (*self).mul(rhs)
1099 }
1100 }
1101
1102 impl MulAssign<I64Vec4> for I64Vec4 {
1103 #[inline]
mul_assign(&mut self, rhs: Self)1104 fn mul_assign(&mut self, rhs: Self) {
1105 self.x.mul_assign(rhs.x);
1106 self.y.mul_assign(rhs.y);
1107 self.z.mul_assign(rhs.z);
1108 self.w.mul_assign(rhs.w);
1109 }
1110 }
1111
1112 impl MulAssign<&I64Vec4> for I64Vec4 {
1113 #[inline]
mul_assign(&mut self, rhs: &I64Vec4)1114 fn mul_assign(&mut self, rhs: &I64Vec4) {
1115 self.mul_assign(*rhs)
1116 }
1117 }
1118
1119 impl Mul<i64> for I64Vec4 {
1120 type Output = Self;
1121 #[inline]
mul(self, rhs: i64) -> Self1122 fn mul(self, rhs: i64) -> Self {
1123 Self {
1124 x: self.x.mul(rhs),
1125 y: self.y.mul(rhs),
1126 z: self.z.mul(rhs),
1127 w: self.w.mul(rhs),
1128 }
1129 }
1130 }
1131
1132 impl Mul<&i64> for I64Vec4 {
1133 type Output = I64Vec4;
1134 #[inline]
mul(self, rhs: &i64) -> I64Vec41135 fn mul(self, rhs: &i64) -> I64Vec4 {
1136 self.mul(*rhs)
1137 }
1138 }
1139
1140 impl Mul<&i64> for &I64Vec4 {
1141 type Output = I64Vec4;
1142 #[inline]
mul(self, rhs: &i64) -> I64Vec41143 fn mul(self, rhs: &i64) -> I64Vec4 {
1144 (*self).mul(*rhs)
1145 }
1146 }
1147
1148 impl Mul<i64> for &I64Vec4 {
1149 type Output = I64Vec4;
1150 #[inline]
mul(self, rhs: i64) -> I64Vec41151 fn mul(self, rhs: i64) -> I64Vec4 {
1152 (*self).mul(rhs)
1153 }
1154 }
1155
1156 impl MulAssign<i64> for I64Vec4 {
1157 #[inline]
mul_assign(&mut self, rhs: i64)1158 fn mul_assign(&mut self, rhs: i64) {
1159 self.x.mul_assign(rhs);
1160 self.y.mul_assign(rhs);
1161 self.z.mul_assign(rhs);
1162 self.w.mul_assign(rhs);
1163 }
1164 }
1165
1166 impl MulAssign<&i64> for I64Vec4 {
1167 #[inline]
mul_assign(&mut self, rhs: &i64)1168 fn mul_assign(&mut self, rhs: &i64) {
1169 self.mul_assign(*rhs)
1170 }
1171 }
1172
1173 impl Mul<I64Vec4> for i64 {
1174 type Output = I64Vec4;
1175 #[inline]
mul(self, rhs: I64Vec4) -> I64Vec41176 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
1177 I64Vec4 {
1178 x: self.mul(rhs.x),
1179 y: self.mul(rhs.y),
1180 z: self.mul(rhs.z),
1181 w: self.mul(rhs.w),
1182 }
1183 }
1184 }
1185
1186 impl Mul<&I64Vec4> for i64 {
1187 type Output = I64Vec4;
1188 #[inline]
mul(self, rhs: &I64Vec4) -> I64Vec41189 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
1190 self.mul(*rhs)
1191 }
1192 }
1193
1194 impl Mul<&I64Vec4> for &i64 {
1195 type Output = I64Vec4;
1196 #[inline]
mul(self, rhs: &I64Vec4) -> I64Vec41197 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
1198 (*self).mul(*rhs)
1199 }
1200 }
1201
1202 impl Mul<I64Vec4> for &i64 {
1203 type Output = I64Vec4;
1204 #[inline]
mul(self, rhs: I64Vec4) -> I64Vec41205 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
1206 (*self).mul(rhs)
1207 }
1208 }
1209
1210 impl Add<I64Vec4> for I64Vec4 {
1211 type Output = Self;
1212 #[inline]
add(self, rhs: Self) -> Self1213 fn add(self, rhs: Self) -> Self {
1214 Self {
1215 x: self.x.add(rhs.x),
1216 y: self.y.add(rhs.y),
1217 z: self.z.add(rhs.z),
1218 w: self.w.add(rhs.w),
1219 }
1220 }
1221 }
1222
1223 impl Add<&I64Vec4> for I64Vec4 {
1224 type Output = I64Vec4;
1225 #[inline]
add(self, rhs: &I64Vec4) -> I64Vec41226 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1227 self.add(*rhs)
1228 }
1229 }
1230
1231 impl Add<&I64Vec4> for &I64Vec4 {
1232 type Output = I64Vec4;
1233 #[inline]
add(self, rhs: &I64Vec4) -> I64Vec41234 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1235 (*self).add(*rhs)
1236 }
1237 }
1238
1239 impl Add<I64Vec4> for &I64Vec4 {
1240 type Output = I64Vec4;
1241 #[inline]
add(self, rhs: I64Vec4) -> I64Vec41242 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1243 (*self).add(rhs)
1244 }
1245 }
1246
1247 impl AddAssign<I64Vec4> for I64Vec4 {
1248 #[inline]
add_assign(&mut self, rhs: Self)1249 fn add_assign(&mut self, rhs: Self) {
1250 self.x.add_assign(rhs.x);
1251 self.y.add_assign(rhs.y);
1252 self.z.add_assign(rhs.z);
1253 self.w.add_assign(rhs.w);
1254 }
1255 }
1256
1257 impl AddAssign<&I64Vec4> for I64Vec4 {
1258 #[inline]
add_assign(&mut self, rhs: &I64Vec4)1259 fn add_assign(&mut self, rhs: &I64Vec4) {
1260 self.add_assign(*rhs)
1261 }
1262 }
1263
1264 impl Add<i64> for I64Vec4 {
1265 type Output = Self;
1266 #[inline]
add(self, rhs: i64) -> Self1267 fn add(self, rhs: i64) -> Self {
1268 Self {
1269 x: self.x.add(rhs),
1270 y: self.y.add(rhs),
1271 z: self.z.add(rhs),
1272 w: self.w.add(rhs),
1273 }
1274 }
1275 }
1276
1277 impl Add<&i64> for I64Vec4 {
1278 type Output = I64Vec4;
1279 #[inline]
add(self, rhs: &i64) -> I64Vec41280 fn add(self, rhs: &i64) -> I64Vec4 {
1281 self.add(*rhs)
1282 }
1283 }
1284
1285 impl Add<&i64> for &I64Vec4 {
1286 type Output = I64Vec4;
1287 #[inline]
add(self, rhs: &i64) -> I64Vec41288 fn add(self, rhs: &i64) -> I64Vec4 {
1289 (*self).add(*rhs)
1290 }
1291 }
1292
1293 impl Add<i64> for &I64Vec4 {
1294 type Output = I64Vec4;
1295 #[inline]
add(self, rhs: i64) -> I64Vec41296 fn add(self, rhs: i64) -> I64Vec4 {
1297 (*self).add(rhs)
1298 }
1299 }
1300
1301 impl AddAssign<i64> for I64Vec4 {
1302 #[inline]
add_assign(&mut self, rhs: i64)1303 fn add_assign(&mut self, rhs: i64) {
1304 self.x.add_assign(rhs);
1305 self.y.add_assign(rhs);
1306 self.z.add_assign(rhs);
1307 self.w.add_assign(rhs);
1308 }
1309 }
1310
1311 impl AddAssign<&i64> for I64Vec4 {
1312 #[inline]
add_assign(&mut self, rhs: &i64)1313 fn add_assign(&mut self, rhs: &i64) {
1314 self.add_assign(*rhs)
1315 }
1316 }
1317
1318 impl Add<I64Vec4> for i64 {
1319 type Output = I64Vec4;
1320 #[inline]
add(self, rhs: I64Vec4) -> I64Vec41321 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1322 I64Vec4 {
1323 x: self.add(rhs.x),
1324 y: self.add(rhs.y),
1325 z: self.add(rhs.z),
1326 w: self.add(rhs.w),
1327 }
1328 }
1329 }
1330
1331 impl Add<&I64Vec4> for i64 {
1332 type Output = I64Vec4;
1333 #[inline]
add(self, rhs: &I64Vec4) -> I64Vec41334 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1335 self.add(*rhs)
1336 }
1337 }
1338
1339 impl Add<&I64Vec4> for &i64 {
1340 type Output = I64Vec4;
1341 #[inline]
add(self, rhs: &I64Vec4) -> I64Vec41342 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1343 (*self).add(*rhs)
1344 }
1345 }
1346
1347 impl Add<I64Vec4> for &i64 {
1348 type Output = I64Vec4;
1349 #[inline]
add(self, rhs: I64Vec4) -> I64Vec41350 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1351 (*self).add(rhs)
1352 }
1353 }
1354
1355 impl Sub<I64Vec4> for I64Vec4 {
1356 type Output = Self;
1357 #[inline]
sub(self, rhs: Self) -> Self1358 fn sub(self, rhs: Self) -> Self {
1359 Self {
1360 x: self.x.sub(rhs.x),
1361 y: self.y.sub(rhs.y),
1362 z: self.z.sub(rhs.z),
1363 w: self.w.sub(rhs.w),
1364 }
1365 }
1366 }
1367
1368 impl Sub<&I64Vec4> for I64Vec4 {
1369 type Output = I64Vec4;
1370 #[inline]
sub(self, rhs: &I64Vec4) -> I64Vec41371 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1372 self.sub(*rhs)
1373 }
1374 }
1375
1376 impl Sub<&I64Vec4> for &I64Vec4 {
1377 type Output = I64Vec4;
1378 #[inline]
sub(self, rhs: &I64Vec4) -> I64Vec41379 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1380 (*self).sub(*rhs)
1381 }
1382 }
1383
1384 impl Sub<I64Vec4> for &I64Vec4 {
1385 type Output = I64Vec4;
1386 #[inline]
sub(self, rhs: I64Vec4) -> I64Vec41387 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1388 (*self).sub(rhs)
1389 }
1390 }
1391
1392 impl SubAssign<I64Vec4> for I64Vec4 {
1393 #[inline]
sub_assign(&mut self, rhs: I64Vec4)1394 fn sub_assign(&mut self, rhs: I64Vec4) {
1395 self.x.sub_assign(rhs.x);
1396 self.y.sub_assign(rhs.y);
1397 self.z.sub_assign(rhs.z);
1398 self.w.sub_assign(rhs.w);
1399 }
1400 }
1401
1402 impl SubAssign<&I64Vec4> for I64Vec4 {
1403 #[inline]
sub_assign(&mut self, rhs: &I64Vec4)1404 fn sub_assign(&mut self, rhs: &I64Vec4) {
1405 self.sub_assign(*rhs)
1406 }
1407 }
1408
1409 impl Sub<i64> for I64Vec4 {
1410 type Output = Self;
1411 #[inline]
sub(self, rhs: i64) -> Self1412 fn sub(self, rhs: i64) -> Self {
1413 Self {
1414 x: self.x.sub(rhs),
1415 y: self.y.sub(rhs),
1416 z: self.z.sub(rhs),
1417 w: self.w.sub(rhs),
1418 }
1419 }
1420 }
1421
1422 impl Sub<&i64> for I64Vec4 {
1423 type Output = I64Vec4;
1424 #[inline]
sub(self, rhs: &i64) -> I64Vec41425 fn sub(self, rhs: &i64) -> I64Vec4 {
1426 self.sub(*rhs)
1427 }
1428 }
1429
1430 impl Sub<&i64> for &I64Vec4 {
1431 type Output = I64Vec4;
1432 #[inline]
sub(self, rhs: &i64) -> I64Vec41433 fn sub(self, rhs: &i64) -> I64Vec4 {
1434 (*self).sub(*rhs)
1435 }
1436 }
1437
1438 impl Sub<i64> for &I64Vec4 {
1439 type Output = I64Vec4;
1440 #[inline]
sub(self, rhs: i64) -> I64Vec41441 fn sub(self, rhs: i64) -> I64Vec4 {
1442 (*self).sub(rhs)
1443 }
1444 }
1445
1446 impl SubAssign<i64> for I64Vec4 {
1447 #[inline]
sub_assign(&mut self, rhs: i64)1448 fn sub_assign(&mut self, rhs: i64) {
1449 self.x.sub_assign(rhs);
1450 self.y.sub_assign(rhs);
1451 self.z.sub_assign(rhs);
1452 self.w.sub_assign(rhs);
1453 }
1454 }
1455
1456 impl SubAssign<&i64> for I64Vec4 {
1457 #[inline]
sub_assign(&mut self, rhs: &i64)1458 fn sub_assign(&mut self, rhs: &i64) {
1459 self.sub_assign(*rhs)
1460 }
1461 }
1462
1463 impl Sub<I64Vec4> for i64 {
1464 type Output = I64Vec4;
1465 #[inline]
sub(self, rhs: I64Vec4) -> I64Vec41466 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1467 I64Vec4 {
1468 x: self.sub(rhs.x),
1469 y: self.sub(rhs.y),
1470 z: self.sub(rhs.z),
1471 w: self.sub(rhs.w),
1472 }
1473 }
1474 }
1475
1476 impl Sub<&I64Vec4> for i64 {
1477 type Output = I64Vec4;
1478 #[inline]
sub(self, rhs: &I64Vec4) -> I64Vec41479 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1480 self.sub(*rhs)
1481 }
1482 }
1483
1484 impl Sub<&I64Vec4> for &i64 {
1485 type Output = I64Vec4;
1486 #[inline]
sub(self, rhs: &I64Vec4) -> I64Vec41487 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1488 (*self).sub(*rhs)
1489 }
1490 }
1491
1492 impl Sub<I64Vec4> for &i64 {
1493 type Output = I64Vec4;
1494 #[inline]
sub(self, rhs: I64Vec4) -> I64Vec41495 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1496 (*self).sub(rhs)
1497 }
1498 }
1499
1500 impl Rem<I64Vec4> for I64Vec4 {
1501 type Output = Self;
1502 #[inline]
rem(self, rhs: Self) -> Self1503 fn rem(self, rhs: Self) -> Self {
1504 Self {
1505 x: self.x.rem(rhs.x),
1506 y: self.y.rem(rhs.y),
1507 z: self.z.rem(rhs.z),
1508 w: self.w.rem(rhs.w),
1509 }
1510 }
1511 }
1512
1513 impl Rem<&I64Vec4> for I64Vec4 {
1514 type Output = I64Vec4;
1515 #[inline]
rem(self, rhs: &I64Vec4) -> I64Vec41516 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1517 self.rem(*rhs)
1518 }
1519 }
1520
1521 impl Rem<&I64Vec4> for &I64Vec4 {
1522 type Output = I64Vec4;
1523 #[inline]
rem(self, rhs: &I64Vec4) -> I64Vec41524 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1525 (*self).rem(*rhs)
1526 }
1527 }
1528
1529 impl Rem<I64Vec4> for &I64Vec4 {
1530 type Output = I64Vec4;
1531 #[inline]
rem(self, rhs: I64Vec4) -> I64Vec41532 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1533 (*self).rem(rhs)
1534 }
1535 }
1536
1537 impl RemAssign<I64Vec4> for I64Vec4 {
1538 #[inline]
rem_assign(&mut self, rhs: Self)1539 fn rem_assign(&mut self, rhs: Self) {
1540 self.x.rem_assign(rhs.x);
1541 self.y.rem_assign(rhs.y);
1542 self.z.rem_assign(rhs.z);
1543 self.w.rem_assign(rhs.w);
1544 }
1545 }
1546
1547 impl RemAssign<&I64Vec4> for I64Vec4 {
1548 #[inline]
rem_assign(&mut self, rhs: &I64Vec4)1549 fn rem_assign(&mut self, rhs: &I64Vec4) {
1550 self.rem_assign(*rhs)
1551 }
1552 }
1553
1554 impl Rem<i64> for I64Vec4 {
1555 type Output = Self;
1556 #[inline]
rem(self, rhs: i64) -> Self1557 fn rem(self, rhs: i64) -> Self {
1558 Self {
1559 x: self.x.rem(rhs),
1560 y: self.y.rem(rhs),
1561 z: self.z.rem(rhs),
1562 w: self.w.rem(rhs),
1563 }
1564 }
1565 }
1566
1567 impl Rem<&i64> for I64Vec4 {
1568 type Output = I64Vec4;
1569 #[inline]
rem(self, rhs: &i64) -> I64Vec41570 fn rem(self, rhs: &i64) -> I64Vec4 {
1571 self.rem(*rhs)
1572 }
1573 }
1574
1575 impl Rem<&i64> for &I64Vec4 {
1576 type Output = I64Vec4;
1577 #[inline]
rem(self, rhs: &i64) -> I64Vec41578 fn rem(self, rhs: &i64) -> I64Vec4 {
1579 (*self).rem(*rhs)
1580 }
1581 }
1582
1583 impl Rem<i64> for &I64Vec4 {
1584 type Output = I64Vec4;
1585 #[inline]
rem(self, rhs: i64) -> I64Vec41586 fn rem(self, rhs: i64) -> I64Vec4 {
1587 (*self).rem(rhs)
1588 }
1589 }
1590
1591 impl RemAssign<i64> for I64Vec4 {
1592 #[inline]
rem_assign(&mut self, rhs: i64)1593 fn rem_assign(&mut self, rhs: i64) {
1594 self.x.rem_assign(rhs);
1595 self.y.rem_assign(rhs);
1596 self.z.rem_assign(rhs);
1597 self.w.rem_assign(rhs);
1598 }
1599 }
1600
1601 impl RemAssign<&i64> for I64Vec4 {
1602 #[inline]
rem_assign(&mut self, rhs: &i64)1603 fn rem_assign(&mut self, rhs: &i64) {
1604 self.rem_assign(*rhs)
1605 }
1606 }
1607
1608 impl Rem<I64Vec4> for i64 {
1609 type Output = I64Vec4;
1610 #[inline]
rem(self, rhs: I64Vec4) -> I64Vec41611 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1612 I64Vec4 {
1613 x: self.rem(rhs.x),
1614 y: self.rem(rhs.y),
1615 z: self.rem(rhs.z),
1616 w: self.rem(rhs.w),
1617 }
1618 }
1619 }
1620
1621 impl Rem<&I64Vec4> for i64 {
1622 type Output = I64Vec4;
1623 #[inline]
rem(self, rhs: &I64Vec4) -> I64Vec41624 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1625 self.rem(*rhs)
1626 }
1627 }
1628
1629 impl Rem<&I64Vec4> for &i64 {
1630 type Output = I64Vec4;
1631 #[inline]
rem(self, rhs: &I64Vec4) -> I64Vec41632 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1633 (*self).rem(*rhs)
1634 }
1635 }
1636
1637 impl Rem<I64Vec4> for &i64 {
1638 type Output = I64Vec4;
1639 #[inline]
rem(self, rhs: I64Vec4) -> I64Vec41640 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1641 (*self).rem(rhs)
1642 }
1643 }
1644
1645 #[cfg(not(target_arch = "spirv"))]
1646 impl AsRef<[i64; 4]> for I64Vec4 {
1647 #[inline]
as_ref(&self) -> &[i64; 4]1648 fn as_ref(&self) -> &[i64; 4] {
1649 unsafe { &*(self as *const I64Vec4 as *const [i64; 4]) }
1650 }
1651 }
1652
1653 #[cfg(not(target_arch = "spirv"))]
1654 impl AsMut<[i64; 4]> for I64Vec4 {
1655 #[inline]
as_mut(&mut self) -> &mut [i64; 4]1656 fn as_mut(&mut self) -> &mut [i64; 4] {
1657 unsafe { &mut *(self as *mut I64Vec4 as *mut [i64; 4]) }
1658 }
1659 }
1660
1661 impl Sum for I64Vec4 {
1662 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = Self>,1663 fn sum<I>(iter: I) -> Self
1664 where
1665 I: Iterator<Item = Self>,
1666 {
1667 iter.fold(Self::ZERO, Self::add)
1668 }
1669 }
1670
1671 impl<'a> Sum<&'a Self> for I64Vec4 {
1672 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1673 fn sum<I>(iter: I) -> Self
1674 where
1675 I: Iterator<Item = &'a Self>,
1676 {
1677 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1678 }
1679 }
1680
1681 impl Product for I64Vec4 {
1682 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = Self>,1683 fn product<I>(iter: I) -> Self
1684 where
1685 I: Iterator<Item = Self>,
1686 {
1687 iter.fold(Self::ONE, Self::mul)
1688 }
1689 }
1690
1691 impl<'a> Product<&'a Self> for I64Vec4 {
1692 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1693 fn product<I>(iter: I) -> Self
1694 where
1695 I: Iterator<Item = &'a Self>,
1696 {
1697 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1698 }
1699 }
1700
1701 impl Neg for I64Vec4 {
1702 type Output = Self;
1703 #[inline]
neg(self) -> Self1704 fn neg(self) -> Self {
1705 Self {
1706 x: self.x.neg(),
1707 y: self.y.neg(),
1708 z: self.z.neg(),
1709 w: self.w.neg(),
1710 }
1711 }
1712 }
1713
1714 impl Neg for &I64Vec4 {
1715 type Output = I64Vec4;
1716 #[inline]
neg(self) -> I64Vec41717 fn neg(self) -> I64Vec4 {
1718 (*self).neg()
1719 }
1720 }
1721
1722 impl Not for I64Vec4 {
1723 type Output = Self;
1724 #[inline]
not(self) -> Self::Output1725 fn not(self) -> Self::Output {
1726 Self {
1727 x: self.x.not(),
1728 y: self.y.not(),
1729 z: self.z.not(),
1730 w: self.w.not(),
1731 }
1732 }
1733 }
1734
1735 impl BitAnd for I64Vec4 {
1736 type Output = Self;
1737 #[inline]
bitand(self, rhs: Self) -> Self::Output1738 fn bitand(self, rhs: Self) -> Self::Output {
1739 Self {
1740 x: self.x.bitand(rhs.x),
1741 y: self.y.bitand(rhs.y),
1742 z: self.z.bitand(rhs.z),
1743 w: self.w.bitand(rhs.w),
1744 }
1745 }
1746 }
1747
1748 impl BitOr for I64Vec4 {
1749 type Output = Self;
1750 #[inline]
bitor(self, rhs: Self) -> Self::Output1751 fn bitor(self, rhs: Self) -> Self::Output {
1752 Self {
1753 x: self.x.bitor(rhs.x),
1754 y: self.y.bitor(rhs.y),
1755 z: self.z.bitor(rhs.z),
1756 w: self.w.bitor(rhs.w),
1757 }
1758 }
1759 }
1760
1761 impl BitXor for I64Vec4 {
1762 type Output = Self;
1763 #[inline]
bitxor(self, rhs: Self) -> Self::Output1764 fn bitxor(self, rhs: Self) -> Self::Output {
1765 Self {
1766 x: self.x.bitxor(rhs.x),
1767 y: self.y.bitxor(rhs.y),
1768 z: self.z.bitxor(rhs.z),
1769 w: self.w.bitxor(rhs.w),
1770 }
1771 }
1772 }
1773
1774 impl BitAnd<i64> for I64Vec4 {
1775 type Output = Self;
1776 #[inline]
bitand(self, rhs: i64) -> Self::Output1777 fn bitand(self, rhs: i64) -> Self::Output {
1778 Self {
1779 x: self.x.bitand(rhs),
1780 y: self.y.bitand(rhs),
1781 z: self.z.bitand(rhs),
1782 w: self.w.bitand(rhs),
1783 }
1784 }
1785 }
1786
1787 impl BitOr<i64> for I64Vec4 {
1788 type Output = Self;
1789 #[inline]
bitor(self, rhs: i64) -> Self::Output1790 fn bitor(self, rhs: i64) -> Self::Output {
1791 Self {
1792 x: self.x.bitor(rhs),
1793 y: self.y.bitor(rhs),
1794 z: self.z.bitor(rhs),
1795 w: self.w.bitor(rhs),
1796 }
1797 }
1798 }
1799
1800 impl BitXor<i64> for I64Vec4 {
1801 type Output = Self;
1802 #[inline]
bitxor(self, rhs: i64) -> Self::Output1803 fn bitxor(self, rhs: i64) -> Self::Output {
1804 Self {
1805 x: self.x.bitxor(rhs),
1806 y: self.y.bitxor(rhs),
1807 z: self.z.bitxor(rhs),
1808 w: self.w.bitxor(rhs),
1809 }
1810 }
1811 }
1812
1813 impl Shl<i8> for I64Vec4 {
1814 type Output = Self;
1815 #[inline]
shl(self, rhs: i8) -> Self::Output1816 fn shl(self, rhs: i8) -> Self::Output {
1817 Self {
1818 x: self.x.shl(rhs),
1819 y: self.y.shl(rhs),
1820 z: self.z.shl(rhs),
1821 w: self.w.shl(rhs),
1822 }
1823 }
1824 }
1825
1826 impl Shr<i8> for I64Vec4 {
1827 type Output = Self;
1828 #[inline]
shr(self, rhs: i8) -> Self::Output1829 fn shr(self, rhs: i8) -> Self::Output {
1830 Self {
1831 x: self.x.shr(rhs),
1832 y: self.y.shr(rhs),
1833 z: self.z.shr(rhs),
1834 w: self.w.shr(rhs),
1835 }
1836 }
1837 }
1838
1839 impl Shl<i16> for I64Vec4 {
1840 type Output = Self;
1841 #[inline]
shl(self, rhs: i16) -> Self::Output1842 fn shl(self, rhs: i16) -> Self::Output {
1843 Self {
1844 x: self.x.shl(rhs),
1845 y: self.y.shl(rhs),
1846 z: self.z.shl(rhs),
1847 w: self.w.shl(rhs),
1848 }
1849 }
1850 }
1851
1852 impl Shr<i16> for I64Vec4 {
1853 type Output = Self;
1854 #[inline]
shr(self, rhs: i16) -> Self::Output1855 fn shr(self, rhs: i16) -> Self::Output {
1856 Self {
1857 x: self.x.shr(rhs),
1858 y: self.y.shr(rhs),
1859 z: self.z.shr(rhs),
1860 w: self.w.shr(rhs),
1861 }
1862 }
1863 }
1864
1865 impl Shl<i32> for I64Vec4 {
1866 type Output = Self;
1867 #[inline]
shl(self, rhs: i32) -> Self::Output1868 fn shl(self, rhs: i32) -> Self::Output {
1869 Self {
1870 x: self.x.shl(rhs),
1871 y: self.y.shl(rhs),
1872 z: self.z.shl(rhs),
1873 w: self.w.shl(rhs),
1874 }
1875 }
1876 }
1877
1878 impl Shr<i32> for I64Vec4 {
1879 type Output = Self;
1880 #[inline]
shr(self, rhs: i32) -> Self::Output1881 fn shr(self, rhs: i32) -> Self::Output {
1882 Self {
1883 x: self.x.shr(rhs),
1884 y: self.y.shr(rhs),
1885 z: self.z.shr(rhs),
1886 w: self.w.shr(rhs),
1887 }
1888 }
1889 }
1890
1891 impl Shl<i64> for I64Vec4 {
1892 type Output = Self;
1893 #[inline]
shl(self, rhs: i64) -> Self::Output1894 fn shl(self, rhs: i64) -> Self::Output {
1895 Self {
1896 x: self.x.shl(rhs),
1897 y: self.y.shl(rhs),
1898 z: self.z.shl(rhs),
1899 w: self.w.shl(rhs),
1900 }
1901 }
1902 }
1903
1904 impl Shr<i64> for I64Vec4 {
1905 type Output = Self;
1906 #[inline]
shr(self, rhs: i64) -> Self::Output1907 fn shr(self, rhs: i64) -> Self::Output {
1908 Self {
1909 x: self.x.shr(rhs),
1910 y: self.y.shr(rhs),
1911 z: self.z.shr(rhs),
1912 w: self.w.shr(rhs),
1913 }
1914 }
1915 }
1916
1917 impl Shl<u8> for I64Vec4 {
1918 type Output = Self;
1919 #[inline]
shl(self, rhs: u8) -> Self::Output1920 fn shl(self, rhs: u8) -> Self::Output {
1921 Self {
1922 x: self.x.shl(rhs),
1923 y: self.y.shl(rhs),
1924 z: self.z.shl(rhs),
1925 w: self.w.shl(rhs),
1926 }
1927 }
1928 }
1929
1930 impl Shr<u8> for I64Vec4 {
1931 type Output = Self;
1932 #[inline]
shr(self, rhs: u8) -> Self::Output1933 fn shr(self, rhs: u8) -> Self::Output {
1934 Self {
1935 x: self.x.shr(rhs),
1936 y: self.y.shr(rhs),
1937 z: self.z.shr(rhs),
1938 w: self.w.shr(rhs),
1939 }
1940 }
1941 }
1942
1943 impl Shl<u16> for I64Vec4 {
1944 type Output = Self;
1945 #[inline]
shl(self, rhs: u16) -> Self::Output1946 fn shl(self, rhs: u16) -> Self::Output {
1947 Self {
1948 x: self.x.shl(rhs),
1949 y: self.y.shl(rhs),
1950 z: self.z.shl(rhs),
1951 w: self.w.shl(rhs),
1952 }
1953 }
1954 }
1955
1956 impl Shr<u16> for I64Vec4 {
1957 type Output = Self;
1958 #[inline]
shr(self, rhs: u16) -> Self::Output1959 fn shr(self, rhs: u16) -> Self::Output {
1960 Self {
1961 x: self.x.shr(rhs),
1962 y: self.y.shr(rhs),
1963 z: self.z.shr(rhs),
1964 w: self.w.shr(rhs),
1965 }
1966 }
1967 }
1968
1969 impl Shl<u32> for I64Vec4 {
1970 type Output = Self;
1971 #[inline]
shl(self, rhs: u32) -> Self::Output1972 fn shl(self, rhs: u32) -> Self::Output {
1973 Self {
1974 x: self.x.shl(rhs),
1975 y: self.y.shl(rhs),
1976 z: self.z.shl(rhs),
1977 w: self.w.shl(rhs),
1978 }
1979 }
1980 }
1981
1982 impl Shr<u32> for I64Vec4 {
1983 type Output = Self;
1984 #[inline]
shr(self, rhs: u32) -> Self::Output1985 fn shr(self, rhs: u32) -> Self::Output {
1986 Self {
1987 x: self.x.shr(rhs),
1988 y: self.y.shr(rhs),
1989 z: self.z.shr(rhs),
1990 w: self.w.shr(rhs),
1991 }
1992 }
1993 }
1994
1995 impl Shl<u64> for I64Vec4 {
1996 type Output = Self;
1997 #[inline]
shl(self, rhs: u64) -> Self::Output1998 fn shl(self, rhs: u64) -> Self::Output {
1999 Self {
2000 x: self.x.shl(rhs),
2001 y: self.y.shl(rhs),
2002 z: self.z.shl(rhs),
2003 w: self.w.shl(rhs),
2004 }
2005 }
2006 }
2007
2008 impl Shr<u64> for I64Vec4 {
2009 type Output = Self;
2010 #[inline]
shr(self, rhs: u64) -> Self::Output2011 fn shr(self, rhs: u64) -> Self::Output {
2012 Self {
2013 x: self.x.shr(rhs),
2014 y: self.y.shr(rhs),
2015 z: self.z.shr(rhs),
2016 w: self.w.shr(rhs),
2017 }
2018 }
2019 }
2020
2021 impl Shl<crate::IVec4> for I64Vec4 {
2022 type Output = Self;
2023 #[inline]
shl(self, rhs: crate::IVec4) -> Self::Output2024 fn shl(self, rhs: crate::IVec4) -> Self::Output {
2025 Self {
2026 x: self.x.shl(rhs.x),
2027 y: self.y.shl(rhs.y),
2028 z: self.z.shl(rhs.z),
2029 w: self.w.shl(rhs.w),
2030 }
2031 }
2032 }
2033
2034 impl Shr<crate::IVec4> for I64Vec4 {
2035 type Output = Self;
2036 #[inline]
shr(self, rhs: crate::IVec4) -> Self::Output2037 fn shr(self, rhs: crate::IVec4) -> Self::Output {
2038 Self {
2039 x: self.x.shr(rhs.x),
2040 y: self.y.shr(rhs.y),
2041 z: self.z.shr(rhs.z),
2042 w: self.w.shr(rhs.w),
2043 }
2044 }
2045 }
2046
2047 impl Shl<crate::UVec4> for I64Vec4 {
2048 type Output = Self;
2049 #[inline]
shl(self, rhs: crate::UVec4) -> Self::Output2050 fn shl(self, rhs: crate::UVec4) -> Self::Output {
2051 Self {
2052 x: self.x.shl(rhs.x),
2053 y: self.y.shl(rhs.y),
2054 z: self.z.shl(rhs.z),
2055 w: self.w.shl(rhs.w),
2056 }
2057 }
2058 }
2059
2060 impl Shr<crate::UVec4> for I64Vec4 {
2061 type Output = Self;
2062 #[inline]
shr(self, rhs: crate::UVec4) -> Self::Output2063 fn shr(self, rhs: crate::UVec4) -> Self::Output {
2064 Self {
2065 x: self.x.shr(rhs.x),
2066 y: self.y.shr(rhs.y),
2067 z: self.z.shr(rhs.z),
2068 w: self.w.shr(rhs.w),
2069 }
2070 }
2071 }
2072
2073 impl Index<usize> for I64Vec4 {
2074 type Output = i64;
2075 #[inline]
index(&self, index: usize) -> &Self::Output2076 fn index(&self, index: usize) -> &Self::Output {
2077 match index {
2078 0 => &self.x,
2079 1 => &self.y,
2080 2 => &self.z,
2081 3 => &self.w,
2082 _ => panic!("index out of bounds"),
2083 }
2084 }
2085 }
2086
2087 impl IndexMut<usize> for I64Vec4 {
2088 #[inline]
index_mut(&mut self, index: usize) -> &mut Self::Output2089 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
2090 match index {
2091 0 => &mut self.x,
2092 1 => &mut self.y,
2093 2 => &mut self.z,
2094 3 => &mut self.w,
2095 _ => panic!("index out of bounds"),
2096 }
2097 }
2098 }
2099
2100 impl fmt::Display for I64Vec4 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2101 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2102 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
2103 }
2104 }
2105
2106 impl fmt::Debug for I64Vec4 {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result2107 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2108 fmt.debug_tuple(stringify!(I64Vec4))
2109 .field(&self.x)
2110 .field(&self.y)
2111 .field(&self.z)
2112 .field(&self.w)
2113 .finish()
2114 }
2115 }
2116
2117 impl From<[i64; 4]> for I64Vec4 {
2118 #[inline]
from(a: [i64; 4]) -> Self2119 fn from(a: [i64; 4]) -> Self {
2120 Self::new(a[0], a[1], a[2], a[3])
2121 }
2122 }
2123
2124 impl From<I64Vec4> for [i64; 4] {
2125 #[inline]
from(v: I64Vec4) -> Self2126 fn from(v: I64Vec4) -> Self {
2127 [v.x, v.y, v.z, v.w]
2128 }
2129 }
2130
2131 impl From<(i64, i64, i64, i64)> for I64Vec4 {
2132 #[inline]
from(t: (i64, i64, i64, i64)) -> Self2133 fn from(t: (i64, i64, i64, i64)) -> Self {
2134 Self::new(t.0, t.1, t.2, t.3)
2135 }
2136 }
2137
2138 impl From<I64Vec4> for (i64, i64, i64, i64) {
2139 #[inline]
from(v: I64Vec4) -> Self2140 fn from(v: I64Vec4) -> Self {
2141 (v.x, v.y, v.z, v.w)
2142 }
2143 }
2144
2145 impl From<(I64Vec3, i64)> for I64Vec4 {
2146 #[inline]
from((v, w): (I64Vec3, i64)) -> Self2147 fn from((v, w): (I64Vec3, i64)) -> Self {
2148 Self::new(v.x, v.y, v.z, w)
2149 }
2150 }
2151
2152 impl From<(i64, I64Vec3)> for I64Vec4 {
2153 #[inline]
from((x, v): (i64, I64Vec3)) -> Self2154 fn from((x, v): (i64, I64Vec3)) -> Self {
2155 Self::new(x, v.x, v.y, v.z)
2156 }
2157 }
2158
2159 impl From<(I64Vec2, i64, i64)> for I64Vec4 {
2160 #[inline]
from((v, z, w): (I64Vec2, i64, i64)) -> Self2161 fn from((v, z, w): (I64Vec2, i64, i64)) -> Self {
2162 Self::new(v.x, v.y, z, w)
2163 }
2164 }
2165
2166 impl From<(I64Vec2, I64Vec2)> for I64Vec4 {
2167 #[inline]
from((v, u): (I64Vec2, I64Vec2)) -> Self2168 fn from((v, u): (I64Vec2, I64Vec2)) -> Self {
2169 Self::new(v.x, v.y, u.x, u.y)
2170 }
2171 }
2172
2173 impl From<I8Vec4> for I64Vec4 {
2174 #[inline]
from(v: I8Vec4) -> Self2175 fn from(v: I8Vec4) -> Self {
2176 Self::new(
2177 i64::from(v.x),
2178 i64::from(v.y),
2179 i64::from(v.z),
2180 i64::from(v.w),
2181 )
2182 }
2183 }
2184
2185 impl From<U8Vec4> for I64Vec4 {
2186 #[inline]
from(v: U8Vec4) -> Self2187 fn from(v: U8Vec4) -> Self {
2188 Self::new(
2189 i64::from(v.x),
2190 i64::from(v.y),
2191 i64::from(v.z),
2192 i64::from(v.w),
2193 )
2194 }
2195 }
2196
2197 impl From<I16Vec4> for I64Vec4 {
2198 #[inline]
from(v: I16Vec4) -> Self2199 fn from(v: I16Vec4) -> Self {
2200 Self::new(
2201 i64::from(v.x),
2202 i64::from(v.y),
2203 i64::from(v.z),
2204 i64::from(v.w),
2205 )
2206 }
2207 }
2208
2209 impl From<U16Vec4> for I64Vec4 {
2210 #[inline]
from(v: U16Vec4) -> Self2211 fn from(v: U16Vec4) -> Self {
2212 Self::new(
2213 i64::from(v.x),
2214 i64::from(v.y),
2215 i64::from(v.z),
2216 i64::from(v.w),
2217 )
2218 }
2219 }
2220
2221 impl From<IVec4> for I64Vec4 {
2222 #[inline]
from(v: IVec4) -> Self2223 fn from(v: IVec4) -> Self {
2224 Self::new(
2225 i64::from(v.x),
2226 i64::from(v.y),
2227 i64::from(v.z),
2228 i64::from(v.w),
2229 )
2230 }
2231 }
2232
2233 impl From<UVec4> for I64Vec4 {
2234 #[inline]
from(v: UVec4) -> Self2235 fn from(v: UVec4) -> Self {
2236 Self::new(
2237 i64::from(v.x),
2238 i64::from(v.y),
2239 i64::from(v.z),
2240 i64::from(v.w),
2241 )
2242 }
2243 }
2244
2245 impl TryFrom<U64Vec4> for I64Vec4 {
2246 type Error = core::num::TryFromIntError;
2247
2248 #[inline]
try_from(v: U64Vec4) -> Result<Self, Self::Error>2249 fn try_from(v: U64Vec4) -> Result<Self, Self::Error> {
2250 Ok(Self::new(
2251 i64::try_from(v.x)?,
2252 i64::try_from(v.y)?,
2253 i64::try_from(v.z)?,
2254 i64::try_from(v.w)?,
2255 ))
2256 }
2257 }
2258
2259 impl From<BVec4> for I64Vec4 {
2260 #[inline]
from(v: BVec4) -> Self2261 fn from(v: BVec4) -> Self {
2262 Self::new(
2263 i64::from(v.x),
2264 i64::from(v.y),
2265 i64::from(v.z),
2266 i64::from(v.w),
2267 )
2268 }
2269 }
2270
2271 #[cfg(not(feature = "scalar-math"))]
2272 impl From<BVec4A> for I64Vec4 {
2273 #[inline]
from(v: BVec4A) -> Self2274 fn from(v: BVec4A) -> Self {
2275 let bool_array: [bool; 4] = v.into();
2276 Self::new(
2277 i64::from(bool_array[0]),
2278 i64::from(bool_array[1]),
2279 i64::from(bool_array[2]),
2280 i64::from(bool_array[3]),
2281 )
2282 }
2283 }
2284