1 /*
2 * SSE optimized hash slide
3 *
4 * Copyright (C) 2017 Intel Corporation
5 * Authors:
6 * Arjan van de Ven <arjan@linux.intel.com>
7 * Jim Kukunas <james.t.kukunas@linux.intel.com>
8 *
9 * For conditions of distribution and use, see copyright notice in zlib.h
10 */
11 #include "../../zbuild.h"
12 #include "../../deflate.h"
13
14 #include <immintrin.h>
15 #include <assert.h>
16
slide_hash_chain(Pos * table0,Pos * table1,uint32_t entries0,uint32_t entries1,const __m128i wsize)17 static inline void slide_hash_chain(Pos *table0, Pos *table1, uint32_t entries0,
18 uint32_t entries1, const __m128i wsize) {
19 uint32_t entries;
20 Pos *table;
21 __m128i value0, value1, result0, result1;
22
23 int on_chain = 0;
24
25 next_chain:
26 table = (on_chain) ? table1 : table0;
27 entries = (on_chain) ? entries1 : entries0;
28
29 table += entries;
30 table -= 16;
31
32 /* ZALLOC allocates this pointer unless the user chose a custom allocator.
33 * Our alloc function is aligned to 64 byte boundaries */
34 do {
35 value0 = _mm_load_si128((__m128i *)table);
36 value1 = _mm_load_si128((__m128i *)(table + 8));
37 result0 = _mm_subs_epu16(value0, wsize);
38 result1 = _mm_subs_epu16(value1, wsize);
39 _mm_store_si128((__m128i *)table, result0);
40 _mm_store_si128((__m128i *)(table + 8), result1);
41
42 table -= 16;
43 entries -= 16;
44 } while (entries > 0);
45
46 ++on_chain;
47 if (on_chain > 1) {
48 return;
49 } else {
50 goto next_chain;
51 }
52 }
53
slide_hash_sse2(deflate_state * s)54 Z_INTERNAL void slide_hash_sse2(deflate_state *s) {
55 uint16_t wsize = (uint16_t)s->w_size;
56 const __m128i xmm_wsize = _mm_set1_epi16((short)wsize);
57
58 assert(((uintptr_t)s->head & 15) == 0);
59 assert(((uintptr_t)s->prev & 15) == 0);
60
61 slide_hash_chain(s->head, s->prev, HASH_SIZE, wsize, xmm_wsize);
62 }
63