1 /* ******************************************************************
2 * hist : Histogram functions
3 * part of Finite State Entropy project
4 * Copyright (c) Meta Platforms, Inc. and affiliates.
5 *
6 * You can contact the author at :
7 * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
8 * - Public forum : https://groups.google.com/forum/#!forum/lz4c
9 *
10 * This source code is licensed under both the BSD-style license (found in the
11 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
12 * in the COPYING file in the root directory of this source tree).
13 * You may select, at your option, one of the above-listed licenses.
14 ****************************************************************** */
15
16 /* --- dependencies --- */
17 #include "../common/mem.h" /* U32, BYTE, etc. */
18 #include "../common/debug.h" /* assert, DEBUGLOG */
19 #include "../common/error_private.h" /* ERROR */
20 #include "hist.h"
21
22
23 /* --- Error management --- */
HIST_isError(size_t code)24 unsigned HIST_isError(size_t code) { return ERR_isError(code); }
25
26 /*-**************************************************************
27 * Histogram functions
28 ****************************************************************/
HIST_add(unsigned * count,const void * src,size_t srcSize)29 void HIST_add(unsigned* count, const void* src, size_t srcSize)
30 {
31 const BYTE* ip = (const BYTE*)src;
32 const BYTE* const end = ip + srcSize;
33
34 while (ip<end) {
35 count[*ip++]++;
36 }
37 }
38
HIST_count_simple(unsigned * count,unsigned * maxSymbolValuePtr,const void * src,size_t srcSize)39 unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
40 const void* src, size_t srcSize)
41 {
42 const BYTE* ip = (const BYTE*)src;
43 const BYTE* const end = ip + srcSize;
44 unsigned maxSymbolValue = *maxSymbolValuePtr;
45 unsigned largestCount=0;
46
47 ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
48 if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
49
50 while (ip<end) {
51 assert(*ip <= maxSymbolValue);
52 count[*ip++]++;
53 }
54
55 while (!count[maxSymbolValue]) maxSymbolValue--;
56 *maxSymbolValuePtr = maxSymbolValue;
57
58 { U32 s;
59 for (s=0; s<=maxSymbolValue; s++)
60 if (count[s] > largestCount) largestCount = count[s];
61 }
62
63 return largestCount;
64 }
65
66 typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
67
68 /* HIST_count_parallel_wksp() :
69 * store histogram into 4 intermediate tables, recombined at the end.
70 * this design makes better use of OoO cpus,
71 * and is noticeably faster when some values are heavily repeated.
72 * But it needs some additional workspace for intermediate tables.
73 * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
74 * @return : largest histogram frequency,
75 * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
HIST_count_parallel_wksp(unsigned * count,unsigned * maxSymbolValuePtr,const void * source,size_t sourceSize,HIST_checkInput_e check,U32 * const workSpace)76 static size_t HIST_count_parallel_wksp(
77 unsigned* count, unsigned* maxSymbolValuePtr,
78 const void* source, size_t sourceSize,
79 HIST_checkInput_e check,
80 U32* const workSpace)
81 {
82 const BYTE* ip = (const BYTE*)source;
83 const BYTE* const iend = ip+sourceSize;
84 size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
85 unsigned max=0;
86 U32* const Counting1 = workSpace;
87 U32* const Counting2 = Counting1 + 256;
88 U32* const Counting3 = Counting2 + 256;
89 U32* const Counting4 = Counting3 + 256;
90
91 /* safety checks */
92 assert(*maxSymbolValuePtr <= 255);
93 if (!sourceSize) {
94 ZSTD_memset(count, 0, countSize);
95 *maxSymbolValuePtr = 0;
96 return 0;
97 }
98 ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
99
100 /* by stripes of 16 bytes */
101 { U32 cached = MEM_read32(ip); ip += 4;
102 while (ip < iend-15) {
103 U32 c = cached; cached = MEM_read32(ip); ip += 4;
104 Counting1[(BYTE) c ]++;
105 Counting2[(BYTE)(c>>8) ]++;
106 Counting3[(BYTE)(c>>16)]++;
107 Counting4[ c>>24 ]++;
108 c = cached; cached = MEM_read32(ip); ip += 4;
109 Counting1[(BYTE) c ]++;
110 Counting2[(BYTE)(c>>8) ]++;
111 Counting3[(BYTE)(c>>16)]++;
112 Counting4[ c>>24 ]++;
113 c = cached; cached = MEM_read32(ip); ip += 4;
114 Counting1[(BYTE) c ]++;
115 Counting2[(BYTE)(c>>8) ]++;
116 Counting3[(BYTE)(c>>16)]++;
117 Counting4[ c>>24 ]++;
118 c = cached; cached = MEM_read32(ip); ip += 4;
119 Counting1[(BYTE) c ]++;
120 Counting2[(BYTE)(c>>8) ]++;
121 Counting3[(BYTE)(c>>16)]++;
122 Counting4[ c>>24 ]++;
123 }
124 ip-=4;
125 }
126
127 /* finish last symbols */
128 while (ip<iend) Counting1[*ip++]++;
129
130 { U32 s;
131 for (s=0; s<256; s++) {
132 Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
133 if (Counting1[s] > max) max = Counting1[s];
134 } }
135
136 { unsigned maxSymbolValue = 255;
137 while (!Counting1[maxSymbolValue]) maxSymbolValue--;
138 if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
139 *maxSymbolValuePtr = maxSymbolValue;
140 ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
141 }
142 return (size_t)max;
143 }
144
145 /* HIST_countFast_wksp() :
146 * Same as HIST_countFast(), but using an externally provided scratch buffer.
147 * `workSpace` is a writable buffer which must be 4-bytes aligned,
148 * `workSpaceSize` must be >= HIST_WKSP_SIZE
149 */
HIST_countFast_wksp(unsigned * count,unsigned * maxSymbolValuePtr,const void * source,size_t sourceSize,void * workSpace,size_t workSpaceSize)150 size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
151 const void* source, size_t sourceSize,
152 void* workSpace, size_t workSpaceSize)
153 {
154 if (sourceSize < 1500) /* heuristic threshold */
155 return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
156 if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
157 if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
158 return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
159 }
160
161 /* HIST_count_wksp() :
162 * Same as HIST_count(), but using an externally provided scratch buffer.
163 * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
HIST_count_wksp(unsigned * count,unsigned * maxSymbolValuePtr,const void * source,size_t sourceSize,void * workSpace,size_t workSpaceSize)164 size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
165 const void* source, size_t sourceSize,
166 void* workSpace, size_t workSpaceSize)
167 {
168 if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
169 if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
170 if (*maxSymbolValuePtr < 255)
171 return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
172 *maxSymbolValuePtr = 255;
173 return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
174 }
175
176 #ifndef ZSTD_NO_UNUSED_FUNCTIONS
177 /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
HIST_countFast(unsigned * count,unsigned * maxSymbolValuePtr,const void * source,size_t sourceSize)178 size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
179 const void* source, size_t sourceSize)
180 {
181 unsigned tmpCounters[HIST_WKSP_SIZE_U32];
182 return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
183 }
184
HIST_count(unsigned * count,unsigned * maxSymbolValuePtr,const void * src,size_t srcSize)185 size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
186 const void* src, size_t srcSize)
187 {
188 unsigned tmpCounters[HIST_WKSP_SIZE_U32];
189 return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
190 }
191 #endif
192