• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Port on Texas Instruments TMS320C6x architecture
3  *
4  *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
5  *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6  *  Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License version 2 as
10  *  published by the Free Software Foundation.
11  */
12 #ifndef _ASM_C6X_UNALIGNED_H
13 #define _ASM_C6X_UNALIGNED_H
14 
15 #include <linux/swab.h>
16 
17 /*
18  * The C64x+ can do unaligned word and dword accesses in hardware
19  * using special load/store instructions.
20  */
21 
get_unaligned_le16(const void * p)22 static inline u16 get_unaligned_le16(const void *p)
23 {
24 	const u8 *_p = p;
25 	return _p[0] | _p[1] << 8;
26 }
27 
get_unaligned_be16(const void * p)28 static inline u16 get_unaligned_be16(const void *p)
29 {
30 	const u8 *_p = p;
31 	return _p[0] << 8 | _p[1];
32 }
33 
put_unaligned_le16(u16 val,void * p)34 static inline void put_unaligned_le16(u16 val, void *p)
35 {
36 	u8 *_p = p;
37 	_p[0] = val;
38 	_p[1] = val >> 8;
39 }
40 
put_unaligned_be16(u16 val,void * p)41 static inline void put_unaligned_be16(u16 val, void *p)
42 {
43 	u8 *_p = p;
44 	_p[0] = val >> 8;
45 	_p[1] = val;
46 }
47 
get_unaligned32(const void * p)48 static inline u32 get_unaligned32(const void *p)
49 {
50 	u32 val = (u32) p;
51 	asm (" ldnw	.d1t1	*%0,%0\n"
52 	     " nop     4\n"
53 	     : "+a"(val));
54 	return val;
55 }
56 
put_unaligned32(u32 val,void * p)57 static inline void put_unaligned32(u32 val, void *p)
58 {
59 	asm volatile (" stnw	.d2t1	%0,*%1\n"
60 		      : : "a"(val), "b"(p) : "memory");
61 }
62 
get_unaligned64(const void * p)63 static inline u64 get_unaligned64(const void *p)
64 {
65 	u64 val;
66 	asm volatile (" ldndw	.d1t1	*%1,%0\n"
67 		      " nop     4\n"
68 		      : "=a"(val) : "a"(p));
69 	return val;
70 }
71 
put_unaligned64(u64 val,const void * p)72 static inline void put_unaligned64(u64 val, const void *p)
73 {
74 	asm volatile (" stndw	.d2t1	%0,*%1\n"
75 		      : : "a"(val), "b"(p) : "memory");
76 }
77 
78 #ifdef CONFIG_CPU_BIG_ENDIAN
79 
80 #define get_unaligned_le32(p)	 __swab32(get_unaligned32(p))
81 #define get_unaligned_le64(p)	 __swab64(get_unaligned64(p))
82 #define get_unaligned_be32(p)	 get_unaligned32(p)
83 #define get_unaligned_be64(p)	 get_unaligned64(p)
84 #define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
85 #define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
86 #define put_unaligned_be32(v, p) put_unaligned32((v), (p))
87 #define put_unaligned_be64(v, p) put_unaligned64((v), (p))
88 #define get_unaligned	__get_unaligned_be
89 #define put_unaligned	__put_unaligned_be
90 
91 #else
92 
93 #define get_unaligned_le32(p)	 get_unaligned32(p)
94 #define get_unaligned_le64(p)	 get_unaligned64(p)
95 #define get_unaligned_be32(p)	 __swab32(get_unaligned32(p))
96 #define get_unaligned_be64(p)	 __swab64(get_unaligned64(p))
97 #define put_unaligned_le32(v, p) put_unaligned32((v), (p))
98 #define put_unaligned_le64(v, p) put_unaligned64((v), (p))
99 #define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
100 #define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
101 #define get_unaligned	__get_unaligned_le
102 #define put_unaligned	__put_unaligned_le
103 
104 #endif
105 
106 /*
107  * Cause a link-time error if we try an unaligned access other than
108  * 1,2,4 or 8 bytes long
109  */
110 extern int __bad_unaligned_access_size(void);
111 
112 #define __get_unaligned_le(ptr) (typeof(*(ptr)))({			\
113 	sizeof(*(ptr)) == 1 ? *(ptr) :					\
114 	  (sizeof(*(ptr)) == 2 ? get_unaligned_le16((ptr)) :		\
115 	     (sizeof(*(ptr)) == 4 ? get_unaligned_le32((ptr)) :		\
116 		(sizeof(*(ptr)) == 8 ? get_unaligned_le64((ptr)) :	\
117 		   __bad_unaligned_access_size())));			\
118 	})
119 
120 #define __get_unaligned_be(ptr) (__force typeof(*(ptr)))({	\
121 	sizeof(*(ptr)) == 1 ? *(ptr) :					\
122 	  (sizeof(*(ptr)) == 2 ? get_unaligned_be16((ptr)) :		\
123 	     (sizeof(*(ptr)) == 4 ? get_unaligned_be32((ptr)) :		\
124 		(sizeof(*(ptr)) == 8 ? get_unaligned_be64((ptr)) :	\
125 		   __bad_unaligned_access_size())));			\
126 	})
127 
128 #define __put_unaligned_le(val, ptr) ({					\
129 	void *__gu_p = (ptr);						\
130 	switch (sizeof(*(ptr))) {					\
131 	case 1:								\
132 		*(u8 *)__gu_p = (__force u8)(val);			\
133 		break;							\
134 	case 2:								\
135 		put_unaligned_le16((__force u16)(val), __gu_p);		\
136 		break;							\
137 	case 4:								\
138 		put_unaligned_le32((__force u32)(val), __gu_p);		\
139 		break;							\
140 	case 8:								\
141 		put_unaligned_le64((__force u64)(val), __gu_p);		\
142 		break;							\
143 	default:							\
144 		__bad_unaligned_access_size();				\
145 		break;							\
146 	}								\
147 	(void)0; })
148 
149 #define __put_unaligned_be(val, ptr) ({					\
150 	void *__gu_p = (ptr);						\
151 	switch (sizeof(*(ptr))) {					\
152 	case 1:								\
153 		*(u8 *)__gu_p = (__force u8)(val);			\
154 		break;							\
155 	case 2:								\
156 		put_unaligned_be16((__force u16)(val), __gu_p);		\
157 		break;							\
158 	case 4:								\
159 		put_unaligned_be32((__force u32)(val), __gu_p);		\
160 		break;							\
161 	case 8:								\
162 		put_unaligned_be64((__force u64)(val), __gu_p);		\
163 		break;							\
164 	default:							\
165 		__bad_unaligned_access_size();				\
166 		break;							\
167 	}								\
168 	(void)0; })
169 
170 #endif /* _ASM_C6X_UNALIGNED_H */
171