1 /* adler32.c -- compute the Adler-32 checksum of a data stream
2 * Copyright (C) 1995-2011, 2016 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h
4 */
5
6 #include "zbuild.h"
7 #include "zutil.h"
8 #include "functable.h"
9 #include "adler32_p.h"
10
11 /* ========================================================================= */
adler32_c(uint32_t adler,const unsigned char * buf,size_t len)12 Z_INTERNAL uint32_t adler32_c(uint32_t adler, const unsigned char *buf, size_t len) {
13 uint32_t sum2;
14 unsigned n;
15
16 /* split Adler-32 into component sums */
17 sum2 = (adler >> 16) & 0xffff;
18 adler &= 0xffff;
19
20 /* in case user likes doing a byte at a time, keep it fast */
21 if (UNLIKELY(len == 1))
22 return adler32_len_1(adler, buf, sum2);
23
24 /* initial Adler-32 value (deferred check for len == 1 speed) */
25 if (UNLIKELY(buf == NULL))
26 return 1L;
27
28 /* in case short lengths are provided, keep it somewhat fast */
29 if (UNLIKELY(len < 16))
30 return adler32_len_16(adler, buf, len, sum2);
31
32 /* do length NMAX blocks -- requires just one modulo operation */
33 while (len >= NMAX) {
34 len -= NMAX;
35 #ifdef UNROLL_MORE
36 n = NMAX / 16; /* NMAX is divisible by 16 */
37 #else
38 n = NMAX / 8; /* NMAX is divisible by 8 */
39 #endif
40 do {
41 #ifdef UNROLL_MORE
42 DO16(adler, sum2, buf); /* 16 sums unrolled */
43 buf += 16;
44 #else
45 DO8(adler, sum2, buf, 0); /* 8 sums unrolled */
46 buf += 8;
47 #endif
48 } while (--n);
49 adler %= BASE;
50 sum2 %= BASE;
51 }
52
53 /* do remaining bytes (less than NMAX, still just one modulo) */
54 if (len) { /* avoid modulos if none remaining */
55 #ifdef UNROLL_MORE
56 while (len >= 16) {
57 len -= 16;
58 DO16(adler, sum2, buf);
59 buf += 16;
60 #else
61 while (len >= 8) {
62 len -= 8;
63 DO8(adler, sum2, buf, 0);
64 buf += 8;
65 #endif
66 }
67 while (len) {
68 --len;
69 adler += *buf++;
70 sum2 += adler;
71 }
72 adler %= BASE;
73 sum2 %= BASE;
74 }
75
76 /* return recombined sums */
77 return adler | (sum2 << 16);
78 }
79
80 #ifdef ZLIB_COMPAT
81 unsigned long Z_EXPORT PREFIX(adler32_z)(unsigned long adler, const unsigned char *buf, size_t len) {
82 return (unsigned long)functable.adler32((uint32_t)adler, buf, len);
83 }
84 #else
85 uint32_t Z_EXPORT PREFIX(adler32_z)(uint32_t adler, const unsigned char *buf, size_t len) {
86 return functable.adler32(adler, buf, len);
87 }
88 #endif
89
90 /* ========================================================================= */
91 #ifdef ZLIB_COMPAT
92 unsigned long Z_EXPORT PREFIX(adler32)(unsigned long adler, const unsigned char *buf, unsigned int len) {
93 return (unsigned long)functable.adler32((uint32_t)adler, buf, len);
94 }
95 #else
96 uint32_t Z_EXPORT PREFIX(adler32)(uint32_t adler, const unsigned char *buf, uint32_t len) {
97 return functable.adler32(adler, buf, len);
98 }
99 #endif
100
101 /* ========================================================================= */
102 static uint32_t adler32_combine_(uint32_t adler1, uint32_t adler2, z_off64_t len2) {
103 uint32_t sum1;
104 uint32_t sum2;
105 unsigned rem;
106
107 /* for negative len, return invalid adler32 as a clue for debugging */
108 if (len2 < 0)
109 return 0xffffffff;
110
111 /* the derivation of this formula is left as an exercise for the reader */
112 len2 %= BASE; /* assumes len2 >= 0 */
113 rem = (unsigned)len2;
114 sum1 = adler1 & 0xffff;
115 sum2 = rem * sum1;
116 sum2 %= BASE;
117 sum1 += (adler2 & 0xffff) + BASE - 1;
118 sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem;
119 if (sum1 >= BASE) sum1 -= BASE;
120 if (sum1 >= BASE) sum1 -= BASE;
121 if (sum2 >= ((unsigned long)BASE << 1)) sum2 -= ((unsigned long)BASE << 1);
122 if (sum2 >= BASE) sum2 -= BASE;
123 return sum1 | (sum2 << 16);
124 }
125
126 /* ========================================================================= */
127 #ifdef ZLIB_COMPAT
128 unsigned long Z_EXPORT PREFIX(adler32_combine)(unsigned long adler1, unsigned long adler2, z_off_t len2) {
129 return (unsigned long)adler32_combine_((uint32_t)adler1, (uint32_t)adler2, len2);
130 }
131
132 unsigned long Z_EXPORT PREFIX4(adler32_combine)(unsigned long adler1, unsigned long adler2, z_off64_t len2) {
133 return (unsigned long)adler32_combine_((uint32_t)adler1, (uint32_t)adler2, len2);
134 }
135 #else
136 uint32_t Z_EXPORT PREFIX4(adler32_combine)(uint32_t adler1, uint32_t adler2, z_off64_t len2) {
137 return adler32_combine_(adler1, adler2, len2);
138 }
139 #endif
140