• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #ifndef AVUTIL_TOMI_INTREADWRITE_H
22 #define AVUTIL_TOMI_INTREADWRITE_H
23 
24 #include <stdint.h>
25 
26 #include "config.h"
27 #include "libavutil/attributes.h"
28 
29 #define AV_RB16 AV_RB16
AV_RB16(const void * p)30 static av_always_inline uint16_t AV_RB16(const void *p)
31 {
32     uint16_t v;
33     __asm__ ("loadacc,   (%1+) \n\t"
34              "rol8             \n\t"
35              "storeacc,  %0    \n\t"
36              "loadacc,   (%1+) \n\t"
37              "add,       %0    \n\t"
38              : "=r"(v), "+a"(p));
39     return v;
40 }
41 
42 #define AV_WB16 AV_WB16
AV_WB16(void * p,uint16_t v)43 static av_always_inline void AV_WB16(void *p, uint16_t v)
44 {
45     __asm__ volatile ("loadacc,   %1    \n\t"
46                       "lsr8             \n\t"
47                       "storeacc,  (%0+) \n\t"
48                       "loadacc,   %1    \n\t"
49                       "storeacc,  (%0+) \n\t"
50                       : "+&a"(p) : "r"(v));
51 }
52 
53 #define AV_RL16 AV_RL16
AV_RL16(const void * p)54 static av_always_inline uint16_t AV_RL16(const void *p)
55 {
56     uint16_t v;
57     __asm__ ("loadacc,   (%1+) \n\t"
58              "storeacc,  %0    \n\t"
59              "loadacc,   (%1+) \n\t"
60              "rol8             \n\t"
61              "add,       %0    \n\t"
62              : "=r"(v), "+a"(p));
63     return v;
64 }
65 
66 #define AV_WL16 AV_WL16
AV_WL16(void * p,uint16_t v)67 static av_always_inline void AV_WL16(void *p, uint16_t v)
68 {
69     __asm__ volatile ("loadacc,   %1    \n\t"
70                       "storeacc,  (%0+) \n\t"
71                       "lsr8             \n\t"
72                       "storeacc,  (%0+) \n\t"
73                       : "+&a"(p) : "r"(v));
74 }
75 
76 #define AV_RB32 AV_RB32
AV_RB32(const void * p)77 static av_always_inline uint32_t AV_RB32(const void *p)
78 {
79     uint32_t v;
80     __asm__ ("loadacc,   (%1+) \n\t"
81              "rol8             \n\t"
82              "rol8             \n\t"
83              "rol8             \n\t"
84              "storeacc,  %0    \n\t"
85              "loadacc,   (%1+) \n\t"
86              "rol8             \n\t"
87              "rol8             \n\t"
88              "add,       %0    \n\t"
89              "loadacc,   (%1+) \n\t"
90              "rol8             \n\t"
91              "add,       %0    \n\t"
92              "loadacc,   (%1+) \n\t"
93              "add,       %0    \n\t"
94              : "=r"(v), "+a"(p));
95     return v;
96 }
97 
98 #define AV_WB32 AV_WB32
AV_WB32(void * p,uint32_t v)99 static av_always_inline void AV_WB32(void *p, uint32_t v)
100 {
101     __asm__ volatile ("loadacc,   #4    \n\t"
102                       "add,       %0    \n\t"
103                       "loadacc,   %1    \n\t"
104                       "storeacc,  (-%0) \n\t"
105                       "lsr8             \n\t"
106                       "storeacc,  (-%0) \n\t"
107                       "lsr8             \n\t"
108                       "storeacc,  (-%0) \n\t"
109                       "lsr8             \n\t"
110                       "storeacc,  (-%0) \n\t"
111                       : "+&a"(p) : "r"(v));
112 }
113 
114 #define AV_RL32 AV_RL32
AV_RL32(const void * p)115 static av_always_inline uint32_t AV_RL32(const void *p)
116 {
117     uint32_t v;
118     __asm__ ("loadacc,   (%1+) \n\t"
119              "storeacc,  %0    \n\t"
120              "loadacc,   (%1+) \n\t"
121              "rol8             \n\t"
122              "add,       %0    \n\t"
123              "loadacc,   (%1+) \n\t"
124              "rol8             \n\t"
125              "rol8             \n\t"
126              "add,       %0    \n\t"
127              "loadacc,   (%1+) \n\t"
128              "rol8             \n\t"
129              "rol8             \n\t"
130              "rol8             \n\t"
131              "add,       %0    \n\t"
132              : "=r"(v), "+a"(p));
133     return v;
134 }
135 
136 #define AV_WL32 AV_WL32
AV_WL32(void * p,uint32_t v)137 static av_always_inline void AV_WL32(void *p, uint32_t v)
138 {
139     __asm__ volatile ("loadacc,   %1    \n\t"
140                       "storeacc,  (%0+) \n\t"
141                       "lsr8             \n\t"
142                       "storeacc,  (%0+) \n\t"
143                       "lsr8             \n\t"
144                       "storeacc,  (%0+) \n\t"
145                       "lsr8             \n\t"
146                       "storeacc,  (%0+) \n\t"
147                       : "+&a"(p) : "r"(v));
148 }
149 
150 #endif /* AVUTIL_TOMI_INTREADWRITE_H */
151