• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMN"
9$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
10#include <assert.h>
11
12#include <xmmintrin.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/vbinary.h>
16
17
18$_MM_OP_PS = {
19$  "ADD": lambda x, y: "_mm_add_ps(%s, %s)" % (x, y),
20$  "DIV": lambda x, y: "_mm_div_ps(%s, %s)" % (x, y),
21$  "MAX": lambda x, y: "_mm_max_ps(%s, %s)" % (x, y),
22$  "MIN": lambda x, y: "_mm_min_ps(%s, %s)" % (x, y),
23$  "MUL": lambda x, y: "_mm_mul_ps(%s, %s)" % (x, y),
24$  "SUB": lambda x, y: "_mm_sub_ps(%s, %s)" % (x, y),
25$}[OP]
26void xnn_f32_v${OP.lower()}_ukernel__sse_x${BATCH_TILE}(
27    size_t n,
28    const float* a,
29    const float* b,
30    float* y,
31    const union xnn_f32_output_params params[restrict static 1])
32{
33  assert(n != 0);
34  assert(n % sizeof(float) == 0);
35
36  const __m128 vy_min = _mm_load_ps(params->sse.min);
37  const __m128 vy_max = _mm_load_ps(params->sse.max);
38
39  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
40    const __m128 va${ABC[0:4]} = _mm_loadu_ps(a);
41    $for N in range(4, BATCH_TILE, 4):
42      const __m128 va${ABC[N:N+4]} = _mm_loadu_ps(a + ${N});
43    a += ${BATCH_TILE};
44
45    const __m128 vb${ABC[0:4]} = _mm_loadu_ps(b);
46    $for N in range(4, BATCH_TILE, 4):
47      const __m128 vb${ABC[N:N+4]} = _mm_loadu_ps(b + ${N});
48    b += ${BATCH_TILE};
49
50    $for N in range(0, BATCH_TILE, 4):
51      __m128 vy${ABC[N:N+4]} = ${_MM_OP_PS("va" + ABC[N:N+4], "vb" + ABC[N:N+4])};
52
53    $for N in range(0, BATCH_TILE, 4):
54      vy${ABC[N:N+4]} = _mm_max_ps(vy${ABC[N:N+4]}, vy_min);
55
56    $for N in range(0, BATCH_TILE, 4):
57      vy${ABC[N:N+4]} = _mm_min_ps(vy${ABC[N:N+4]}, vy_max);
58
59    _mm_storeu_ps(y, vy${ABC[0:4]});
60    $for N in range(4, BATCH_TILE, 4):
61      _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]});
62    y += ${BATCH_TILE};
63  }
64  $if BATCH_TILE >= 4:
65    for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
66      const __m128 va0123 = _mm_loadu_ps(a);
67      a += 4;
68
69      const __m128 vb0123 = _mm_loadu_ps(b);
70      b += 4;
71
72      __m128 vy0123 = ${_MM_OP_PS("va0123", "vb0123")};
73      vy0123 = _mm_max_ps(vy0123, vy_min);
74      vy0123 = _mm_min_ps(vy0123, vy_max);
75      _mm_storeu_ps(y, vy0123);
76      y += 4;
77    }
78  if XNN_UNLIKELY(n != 0) {
79    const __m128 va0123 = _mm_loadu_ps(a);
80    const __m128 vb0123 = _mm_loadu_ps(b);
81
82    __m128 vy0123 = ${_MM_OP_PS("va0123", "vb0123")};
83    vy0123 = _mm_max_ps(vy0123, vy_min);
84    vy0123 = _mm_min_ps(vy0123, vy_max);
85    if (n & (2 * sizeof(float))) {
86      _mm_storel_pi((__m64*) y, vy0123);
87      vy0123 = _mm_movehl_ps(vy0123, vy0123);
88      y += 2;
89    }
90    if (n & (1 * sizeof(float))) {
91      _mm_store_ss(y, vy0123);
92    }
93  }
94}
95