1 /*
2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <emmintrin.h>
13
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx/vpx_integer.h"
16 #include "vpx_dsp/x86/mem_sse2.h"
17
vpx_comp_avg_pred_sse2(uint8_t * comp_pred,const uint8_t * pred,int width,int height,const uint8_t * ref,int ref_stride)18 void vpx_comp_avg_pred_sse2(uint8_t *comp_pred, const uint8_t *pred, int width,
19 int height, const uint8_t *ref, int ref_stride) {
20 /* comp_pred and pred must be 16 byte aligned. */
21 assert(((intptr_t)comp_pred & 0xf) == 0);
22 assert(((intptr_t)pred & 0xf) == 0);
23 if (width > 8) {
24 int x, y;
25 for (y = 0; y < height; ++y) {
26 for (x = 0; x < width; x += 16) {
27 const __m128i p = _mm_load_si128((const __m128i *)(pred + x));
28 const __m128i r = _mm_loadu_si128((const __m128i *)(ref + x));
29 const __m128i avg = _mm_avg_epu8(p, r);
30 _mm_store_si128((__m128i *)(comp_pred + x), avg);
31 }
32 comp_pred += width;
33 pred += width;
34 ref += ref_stride;
35 }
36 } else { // width must be 4 or 8.
37 int i;
38 // Process 16 elements at a time. comp_pred and pred have width == stride
39 // and therefore live in contigious memory. 4*4, 4*8, 8*4, 8*8, and 8*16 are
40 // all divisible by 16 so just ref needs to be massaged when loading.
41 for (i = 0; i < width * height; i += 16) {
42 const __m128i p = _mm_load_si128((const __m128i *)pred);
43 __m128i r;
44 __m128i avg;
45 if (width == ref_stride) {
46 r = _mm_loadu_si128((const __m128i *)ref);
47 ref += 16;
48 } else if (width == 4) {
49 r = _mm_set_epi32(loadu_uint32(ref + 3 * ref_stride),
50 loadu_uint32(ref + 2 * ref_stride),
51 loadu_uint32(ref + ref_stride), loadu_uint32(ref));
52
53 ref += 4 * ref_stride;
54 } else {
55 const __m128i r_0 = _mm_loadl_epi64((const __m128i *)ref);
56 assert(width == 8);
57 r = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(r_0),
58 (const __m64 *)(ref + ref_stride)));
59
60 ref += 2 * ref_stride;
61 }
62 avg = _mm_avg_epu8(p, r);
63 _mm_store_si128((__m128i *)comp_pred, avg);
64
65 pred += 16;
66 comp_pred += 16;
67 }
68 }
69 }
70