1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * LZO1X Compressor from LZO
4 *
5 * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
6 *
7 * The full LZO package can be found at:
8 * http://www.oberhumer.com/opensource/lzo/
9 *
10 * Changed for Linux kernel use by:
11 * Nitin Gupta <nitingupta910@gmail.com>
12 * Richard Purdie <rpurdie@openedhand.com>
13 */
14
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <asm/unaligned.h>
18 #include <linux/lzo.h>
19 #include "lzodefs.h"
20
21 #undef LZO_UNSAFE
22
23 #ifndef LZO_SAFE
24 #define LZO_UNSAFE 1
25 #define LZO_SAFE(name) name
26 #define HAVE_OP(x) 1
27 #endif
28
29 #define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
30
31 static noinline int
LZO_SAFE(lzo1x_1_do_compress)32 LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len,
33 unsigned char **out, unsigned char *op_end,
34 size_t *tp, void *wrkmem,
35 signed char *state_offset,
36 const unsigned char bitstream_version)
37 {
38 const unsigned char *ip;
39 unsigned char *op;
40 const unsigned char * const in_end = in + in_len;
41 const unsigned char * const ip_end = in + in_len - 20;
42 const unsigned char *ii;
43 lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
44 size_t ti = *tp;
45
46 op = *out;
47 ip = in;
48 ii = ip;
49 ip += ti < 4 ? 4 - ti : 0;
50
51 for (;;) {
52 const unsigned char *m_pos = NULL;
53 size_t t, m_len, m_off;
54 u32 dv;
55 u32 run_length = 0;
56 literal:
57 ip += 1 + ((ip - ii) >> 5);
58 next:
59 if (unlikely(ip >= ip_end))
60 break;
61 dv = get_unaligned_le32(ip);
62
63 if (dv == 0 && bitstream_version) {
64 const unsigned char *ir = ip + 4;
65 const unsigned char *limit = ip_end
66 < (ip + MAX_ZERO_RUN_LENGTH + 1)
67 ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1;
68 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
69 defined(LZO_FAST_64BIT_MEMORY_ACCESS)
70 u64 dv64;
71
72 for (; (ir + 32) <= limit; ir += 32) {
73 dv64 = get_unaligned((u64 *)ir);
74 dv64 |= get_unaligned((u64 *)ir + 1);
75 dv64 |= get_unaligned((u64 *)ir + 2);
76 dv64 |= get_unaligned((u64 *)ir + 3);
77 if (dv64)
78 break;
79 }
80 for (; (ir + 8) <= limit; ir += 8) {
81 dv64 = get_unaligned((u64 *)ir);
82 if (dv64) {
83 # if defined(__LITTLE_ENDIAN)
84 ir += __builtin_ctzll(dv64) >> 3;
85 # elif defined(__BIG_ENDIAN)
86 ir += __builtin_clzll(dv64) >> 3;
87 # else
88 # error "missing endian definition"
89 # endif
90 break;
91 }
92 }
93 #else
94 while ((ir < (const unsigned char *)
95 ALIGN((uintptr_t)ir, 4)) &&
96 (ir < limit) && (*ir == 0))
97 ir++;
98 if (IS_ALIGNED((uintptr_t)ir, 4)) {
99 for (; (ir + 4) <= limit; ir += 4) {
100 dv = *((u32 *)ir);
101 if (dv) {
102 # if defined(__LITTLE_ENDIAN)
103 ir += __builtin_ctz(dv) >> 3;
104 # elif defined(__BIG_ENDIAN)
105 ir += __builtin_clz(dv) >> 3;
106 # else
107 # error "missing endian definition"
108 # endif
109 break;
110 }
111 }
112 }
113 #endif
114 while (likely(ir < limit) && unlikely(*ir == 0))
115 ir++;
116 run_length = ir - ip;
117 if (run_length > MAX_ZERO_RUN_LENGTH)
118 run_length = MAX_ZERO_RUN_LENGTH;
119 } else {
120 t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
121 m_pos = in + dict[t];
122 dict[t] = (lzo_dict_t) (ip - in);
123 if (unlikely(dv != get_unaligned_le32(m_pos)))
124 goto literal;
125 }
126
127 ii -= ti;
128 ti = 0;
129 t = ip - ii;
130 if (t != 0) {
131 if (t <= 3) {
132 op[*state_offset] |= t;
133 NEED_OP(4);
134 COPY4(op, ii);
135 op += t;
136 } else if (t <= 16) {
137 NEED_OP(17);
138 *op++ = (t - 3);
139 COPY8(op, ii);
140 COPY8(op + 8, ii + 8);
141 op += t;
142 } else {
143 if (t <= 18) {
144 NEED_OP(1);
145 *op++ = (t - 3);
146 } else {
147 size_t tt = t - 18;
148 NEED_OP(1);
149 *op++ = 0;
150 while (unlikely(tt > 255)) {
151 tt -= 255;
152 NEED_OP(1);
153 *op++ = 0;
154 }
155 NEED_OP(1);
156 *op++ = tt;
157 }
158 NEED_OP(t);
159 do {
160 COPY8(op, ii);
161 COPY8(op + 8, ii + 8);
162 op += 16;
163 ii += 16;
164 t -= 16;
165 } while (t >= 16);
166 if (t > 0) do {
167 *op++ = *ii++;
168 } while (--t > 0);
169 }
170 }
171
172 if (unlikely(run_length)) {
173 ip += run_length;
174 run_length -= MIN_ZERO_RUN_LENGTH;
175 NEED_OP(4);
176 put_unaligned_le32((run_length << 21) | 0xfffc18
177 | (run_length & 0x7), op);
178 op += 4;
179 run_length = 0;
180 *state_offset = -3;
181 goto finished_writing_instruction;
182 }
183
184 m_len = 4;
185 {
186 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
187 u64 v;
188 v = get_unaligned((const u64 *) (ip + m_len)) ^
189 get_unaligned((const u64 *) (m_pos + m_len));
190 if (unlikely(v == 0)) {
191 do {
192 m_len += 8;
193 v = get_unaligned((const u64 *) (ip + m_len)) ^
194 get_unaligned((const u64 *) (m_pos + m_len));
195 if (unlikely(ip + m_len >= ip_end))
196 goto m_len_done;
197 } while (v == 0);
198 }
199 # if defined(__LITTLE_ENDIAN)
200 m_len += (unsigned) __builtin_ctzll(v) / 8;
201 # elif defined(__BIG_ENDIAN)
202 m_len += (unsigned) __builtin_clzll(v) / 8;
203 # else
204 # error "missing endian definition"
205 # endif
206 #elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32)
207 u32 v;
208 v = get_unaligned((const u32 *) (ip + m_len)) ^
209 get_unaligned((const u32 *) (m_pos + m_len));
210 if (unlikely(v == 0)) {
211 do {
212 m_len += 4;
213 v = get_unaligned((const u32 *) (ip + m_len)) ^
214 get_unaligned((const u32 *) (m_pos + m_len));
215 if (v != 0)
216 break;
217 m_len += 4;
218 v = get_unaligned((const u32 *) (ip + m_len)) ^
219 get_unaligned((const u32 *) (m_pos + m_len));
220 if (unlikely(ip + m_len >= ip_end))
221 goto m_len_done;
222 } while (v == 0);
223 }
224 # if defined(__LITTLE_ENDIAN)
225 m_len += (unsigned) __builtin_ctz(v) / 8;
226 # elif defined(__BIG_ENDIAN)
227 m_len += (unsigned) __builtin_clz(v) / 8;
228 # else
229 # error "missing endian definition"
230 # endif
231 #else
232 if (unlikely(ip[m_len] == m_pos[m_len])) {
233 do {
234 m_len += 1;
235 if (ip[m_len] != m_pos[m_len])
236 break;
237 m_len += 1;
238 if (ip[m_len] != m_pos[m_len])
239 break;
240 m_len += 1;
241 if (ip[m_len] != m_pos[m_len])
242 break;
243 m_len += 1;
244 if (ip[m_len] != m_pos[m_len])
245 break;
246 m_len += 1;
247 if (ip[m_len] != m_pos[m_len])
248 break;
249 m_len += 1;
250 if (ip[m_len] != m_pos[m_len])
251 break;
252 m_len += 1;
253 if (ip[m_len] != m_pos[m_len])
254 break;
255 m_len += 1;
256 if (unlikely(ip + m_len >= ip_end))
257 goto m_len_done;
258 } while (ip[m_len] == m_pos[m_len]);
259 }
260 #endif
261 }
262 m_len_done:
263
264 m_off = ip - m_pos;
265 ip += m_len;
266 if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
267 m_off -= 1;
268 NEED_OP(2);
269 *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
270 *op++ = (m_off >> 3);
271 } else if (m_off <= M3_MAX_OFFSET) {
272 m_off -= 1;
273 NEED_OP(1);
274 if (m_len <= M3_MAX_LEN)
275 *op++ = (M3_MARKER | (m_len - 2));
276 else {
277 m_len -= M3_MAX_LEN;
278 *op++ = M3_MARKER | 0;
279 while (unlikely(m_len > 255)) {
280 m_len -= 255;
281 NEED_OP(1);
282 *op++ = 0;
283 }
284 NEED_OP(1);
285 *op++ = (m_len);
286 }
287 NEED_OP(2);
288 *op++ = (m_off << 2);
289 *op++ = (m_off >> 6);
290 } else {
291 m_off -= 0x4000;
292 NEED_OP(1);
293 if (m_len <= M4_MAX_LEN)
294 *op++ = (M4_MARKER | ((m_off >> 11) & 8)
295 | (m_len - 2));
296 else {
297 if (unlikely(((m_off & 0x403f) == 0x403f)
298 && (m_len >= 261)
299 && (m_len <= 264))
300 && likely(bitstream_version)) {
301 // Under lzo-rle, block copies
302 // for 261 <= length <= 264 and
303 // (distance & 0x80f3) == 0x80f3
304 // can result in ambiguous
305 // output. Adjust length
306 // to 260 to prevent ambiguity.
307 ip -= m_len - 260;
308 m_len = 260;
309 }
310 m_len -= M4_MAX_LEN;
311 *op++ = (M4_MARKER | ((m_off >> 11) & 8));
312 while (unlikely(m_len > 255)) {
313 NEED_OP(1);
314 m_len -= 255;
315 *op++ = 0;
316 }
317 NEED_OP(1);
318 *op++ = (m_len);
319 }
320 NEED_OP(2);
321 *op++ = (m_off << 2);
322 *op++ = (m_off >> 6);
323 }
324 *state_offset = -2;
325 finished_writing_instruction:
326 ii = ip;
327 goto next;
328 }
329 *out = op;
330 *tp = in_end - (ii - ti);
331 return LZO_E_OK;
332
333 output_overrun:
334 return LZO_E_OUTPUT_OVERRUN;
335 }
336
LZO_SAFE(lzogeneric1x_1_compress)337 static int LZO_SAFE(lzogeneric1x_1_compress)(
338 const unsigned char *in, size_t in_len,
339 unsigned char *out, size_t *out_len,
340 void *wrkmem, const unsigned char bitstream_version)
341 {
342 unsigned char * const op_end = out + *out_len;
343 const unsigned char *ip = in;
344 unsigned char *op = out;
345 unsigned char *data_start;
346 size_t l = in_len;
347 size_t t = 0;
348 signed char state_offset = -2;
349 unsigned int m4_max_offset;
350
351 // LZO v0 will never write 17 as first byte (except for zero-length
352 // input), so this is used to version the bitstream
353 if (bitstream_version > 0) {
354 *op++ = 17;
355 *op++ = bitstream_version;
356 m4_max_offset = M4_MAX_OFFSET_V1;
357 } else {
358 m4_max_offset = M4_MAX_OFFSET_V0;
359 }
360
361 data_start = op;
362
363 while (l > 20) {
364 size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
365 uintptr_t ll_end = (uintptr_t) ip + ll;
366 int err;
367
368 if ((ll_end + ((t + ll) >> 5)) <= ll_end)
369 break;
370 BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
371 memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
372 err = LZO_SAFE(lzo1x_1_do_compress)(
373 ip, ll, &op, op_end, &t, wrkmem,
374 &state_offset, bitstream_version);
375 if (err != LZO_E_OK)
376 return err;
377 ip += ll;
378 l -= ll;
379 }
380 t += l;
381
382 if (t > 0) {
383 const unsigned char *ii = in + in_len - t;
384
385 if (op == data_start && t <= 238) {
386 NEED_OP(1);
387 *op++ = (17 + t);
388 } else if (t <= 3) {
389 op[state_offset] |= t;
390 } else if (t <= 18) {
391 NEED_OP(1);
392 *op++ = (t - 3);
393 } else {
394 size_t tt = t - 18;
395 NEED_OP(1);
396 *op++ = 0;
397 while (tt > 255) {
398 tt -= 255;
399 NEED_OP(1);
400 *op++ = 0;
401 }
402 NEED_OP(1);
403 *op++ = tt;
404 }
405 NEED_OP(t);
406 if (t >= 16) do {
407 COPY8(op, ii);
408 COPY8(op + 8, ii + 8);
409 op += 16;
410 ii += 16;
411 t -= 16;
412 } while (t >= 16);
413 if (t > 0) do {
414 *op++ = *ii++;
415 } while (--t > 0);
416 }
417
418 NEED_OP(3);
419 *op++ = M4_MARKER | 1;
420 *op++ = 0;
421 *op++ = 0;
422
423 *out_len = op - out;
424 return LZO_E_OK;
425
426 output_overrun:
427 return LZO_E_OUTPUT_OVERRUN;
428 }
429
LZO_SAFE(lzo1x_1_compress)430 int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len,
431 unsigned char *out, size_t *out_len,
432 void *wrkmem)
433 {
434 return LZO_SAFE(lzogeneric1x_1_compress)(
435 in, in_len, out, out_len, wrkmem, 0);
436 }
437
LZO_SAFE(lzorle1x_1_compress)438 int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len,
439 unsigned char *out, size_t *out_len,
440 void *wrkmem)
441 {
442 return LZO_SAFE(lzogeneric1x_1_compress)(
443 in, in_len, out, out_len, wrkmem, LZO_VERSION);
444 }
445
446 EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress));
447 EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress));
448
449 #ifndef LZO_UNSAFE
450 MODULE_LICENSE("GPL");
451 MODULE_DESCRIPTION("LZO1X-1 Compressor");
452 #endif
453