• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * lz4defs.h -- architecture specific defines
3  *
4  * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 /*
12  * Detects 64 bits mode
13  */
14 #if defined(CONFIG_64BIT)
15 #define LZ4_ARCH64 1
16 #else
17 #define LZ4_ARCH64 0
18 #endif
19 
20 /*
21  * Architecture-specific macros
22  */
23 #define BYTE	u8
24 typedef struct _U16_S { u16 v; } U16_S;
25 typedef struct _U32_S { u32 v; } U32_S;
26 typedef struct _U64_S { u64 v; } U64_S;
27 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)		\
28 	|| defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6	\
29 	&& defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
30 
31 #define A16(x) (((U16_S *)(x))->v)
32 #define A32(x) (((U32_S *)(x))->v)
33 #define A64(x) (((U64_S *)(x))->v)
34 
35 #define PUT4(s, d) (A32(d) = A32(s))
36 #define PUT8(s, d) (A64(d) = A64(s))
37 
38 #define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
39 	(d = s - A16(p))
40 
41 #define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
42 	do {	\
43 		A16(p) = v; \
44 		p += 2; \
45 	} while (0)
46 #else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
47 
48 #define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
49 #define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
50 #define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
51 
52 #define PUT4(s, d) \
53 	put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
54 #define PUT8(s, d) \
55 	put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
56 
57 #define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
58 	(d = s - get_unaligned_le16(p))
59 
60 #define LZ4_WRITE_LITTLEENDIAN_16(p, v)			\
61 	do {						\
62 		put_unaligned_le16(v, (u16 *)(p));	\
63 		p += 2;					\
64 	} while (0)
65 #endif
66 
67 #define COPYLENGTH 8
68 #define ML_BITS  4
69 #define ML_MASK  ((1U << ML_BITS) - 1)
70 #define RUN_BITS (8 - ML_BITS)
71 #define RUN_MASK ((1U << RUN_BITS) - 1)
72 #define MEMORY_USAGE	14
73 #define MINMATCH	4
74 #define SKIPSTRENGTH	6
75 #define LASTLITERALS	5
76 #define MFLIMIT		(COPYLENGTH + MINMATCH)
77 #define MINLENGTH	(MFLIMIT + 1)
78 #define MAXD_LOG	16
79 #define MAXD		(1 << MAXD_LOG)
80 #define MAXD_MASK	(u32)(MAXD - 1)
81 #define MAX_DISTANCE	(MAXD - 1)
82 #define HASH_LOG	(MAXD_LOG - 1)
83 #define HASHTABLESIZE	(1 << HASH_LOG)
84 #define MAX_NB_ATTEMPTS	256
85 #define OPTIMAL_ML	(int)((ML_MASK-1)+MINMATCH)
86 #define LZ4_64KLIMIT	((1<<16) + (MFLIMIT - 1))
87 #define HASHLOG64K	((MEMORY_USAGE - 2) + 1)
88 #define HASH64KTABLESIZE	(1U << HASHLOG64K)
89 #define LZ4_HASH_VALUE(p)	(((A32(p)) * 2654435761U) >> \
90 				((MINMATCH * 8) - (MEMORY_USAGE-2)))
91 #define LZ4_HASH64K_VALUE(p)	(((A32(p)) * 2654435761U) >> \
92 				((MINMATCH * 8) - HASHLOG64K))
93 #define HASH_VALUE(p)		(((A32(p)) * 2654435761U) >> \
94 				((MINMATCH * 8) - HASH_LOG))
95 
96 #if LZ4_ARCH64/* 64-bit */
97 #define STEPSIZE 8
98 
99 #define LZ4_COPYSTEP(s, d)	\
100 	do {			\
101 		PUT8(s, d);	\
102 		d += 8;		\
103 		s += 8;		\
104 	} while (0)
105 
106 #define LZ4_COPYPACKET(s, d)	LZ4_COPYSTEP(s, d)
107 
108 #define LZ4_SECURECOPY(s, d, e)			\
109 	do {					\
110 		if (d < e) {			\
111 			LZ4_WILDCOPY(s, d, e);	\
112 		}				\
113 	} while (0)
114 #define HTYPE u32
115 
116 #ifdef __BIG_ENDIAN
117 #define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
118 #else
119 #define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
120 #endif
121 
122 #else	/* 32-bit */
123 #define STEPSIZE 4
124 
125 #define LZ4_COPYSTEP(s, d)	\
126 	do {			\
127 		PUT4(s, d);	\
128 		d += 4;		\
129 		s += 4;		\
130 	} while (0)
131 
132 #define LZ4_COPYPACKET(s, d)		\
133 	do {				\
134 		LZ4_COPYSTEP(s, d);	\
135 		LZ4_COPYSTEP(s, d);	\
136 	} while (0)
137 
138 #define LZ4_SECURECOPY	LZ4_WILDCOPY
139 #define HTYPE const u8*
140 
141 #ifdef __BIG_ENDIAN
142 #define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
143 #else
144 #define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
145 #endif
146 
147 #endif
148 
149 #define LZ4_WILDCOPY(s, d, e)		\
150 	do {				\
151 		LZ4_COPYPACKET(s, d);	\
152 	} while (d < e)
153 
154 #define LZ4_BLINDCOPY(s, d, l)	\
155 	do {	\
156 		u8 *e = (d) + l;	\
157 		LZ4_WILDCOPY(s, d, e);	\
158 		d = e;	\
159 	} while (0)
160