• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9$assert OP in ["RNDNE", "RNDZ", "RNDU", "RNDD"]
10#include <assert.h>
11
12#include <smmintrin.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/math.h>
16#include <xnnpack/vunary.h>
17
18
19$_MM_FROUND_TO_FLAG = {
20$  "RNDNE": "_MM_FROUND_TO_NEAREST_INT",
21$  "RNDZ": "_MM_FROUND_TO_ZERO",
22$  "RNDU": "_MM_FROUND_TO_POS_INF",
23$  "RNDD": "_MM_FROUND_TO_NEG_INF",
24$}[OP]
25void xnn_f32_v${OP.lower()}_ukernel__sse41_x${BATCH_TILE}(
26    size_t n,
27    const float* x,
28    float* y,
29    const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
30{
31  assert(n != 0);
32  assert(n % sizeof(float) == 0);
33
34  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
35    const __m128 vx${ABC[0:4]} = _mm_loadu_ps(x);
36    $for N in range(4, BATCH_TILE, 4):
37      const __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N});
38    x += ${BATCH_TILE};
39
40    $for N in range(0, BATCH_TILE, 4):
41      const __m128 vy${ABC[N:N+4]} = _mm_round_ps(vx${ABC[N:N+4]}, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC);
42
43    _mm_storeu_ps(y, vy${ABC[0:4]});
44    $for N in range(4, BATCH_TILE, 4):
45      _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]});
46    y += ${BATCH_TILE};
47  }
48  $if BATCH_TILE > 4:
49    for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
50      const __m128 vx = _mm_loadu_ps(x);
51      x += 4;
52
53      const __m128 vy = _mm_round_ps(vx, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC);
54
55      _mm_storeu_ps(y, vy);
56      y += 4;
57    }
58  if XNN_UNLIKELY(n != 0) {
59    const __m128 vx = _mm_loadu_ps(x);
60    __m128 vy = _mm_round_ps(vx, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC);
61    if (n & (2 * sizeof(float))) {
62      _mm_storel_pi((__m64*) y, vy);
63      vy = _mm_movehl_ps(vy, vy);
64      y += 2;
65    }
66    if (n & (1 * sizeof(float))) {
67      _mm_store_ss(y, vy);
68    }
69  }
70}
71