• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env perl
2# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the Apache License 2.0 (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# Poly1305 hash for C64x+.
18#
19# October 2015
20#
21# Performance is [incredible for a 32-bit processor] 1.82 cycles per
22# processed byte. Comparison to compiler-generated code is problematic,
23# because results were observed to vary from 2.1 to 7.6 cpb depending
24# on compiler's ability to inline small functions. Compiler also
25# disables interrupts for some reason, thus making interrupt response
26# time dependent on input length. This module on the other hand is free
27# from such limitation.
28
29$output=pop and open STDOUT,">$output";
30
31($CTXA,$INPB,$LEN,$PADBIT)=("A4","B4","A6","B6");
32($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN);
33($D0,$D1,$D2,$D3)=         ("A9","B9","A11","B11");
34($R0,$R1,$R2,$R3,$S1,$S2,$S3,$S3b)=("A0","B0","A1","B1","A12","B12","A13","B13");
35($THREE,$R0b,$S2a)=("B7","B5","A5");
36
37$code.=<<___;
38	.text
39
40	.if	.ASSEMBLER_VERSION<7000000
41	.asg	0,__TI_EABI__
42	.endif
43	.if	__TI_EABI__
44	.asg	poly1305_init,_poly1305_init
45	.asg	poly1305_blocks,_poly1305_blocks
46	.asg	poly1305_emit,_poly1305_emit
47	.endif
48
49	.asg	B3,RA
50	.asg	A15,FP
51	.asg	B15,SP
52
53	.if	.LITTLE_ENDIAN
54	.asg	MV,SWAP2
55	.asg	MV.L,SWAP4
56	.endif
57
58	.global	_poly1305_init
59_poly1305_init:
60	.asmfunc
61	LDNDW	*${INPB}[0],B17:B16	; load key material
62	LDNDW	*${INPB}[1],A17:A16
63
64||	ZERO	B9:B8
65||	MVK	-1,B0
66	STDW	B9:B8,*${CTXA}[0]	; initialize h1:h0
67||	SHRU	B0,4,B0			; 0x0fffffff
68||	MVK	-4,B1
69	STDW	B9:B8,*${CTXA}[1]	; initialize h3:h2
70||	AND	B0,B1,B1		; 0x0ffffffc
71	STW	B8,*${CTXA}[4]		; initialize h4
72
73	.if	.BIG_ENDIAN
74	SWAP2	B16,B17
75||	SWAP2	B17,B16
76	SWAP2	A16,A17
77||	SWAP2	A17,A16
78	SWAP4	B16,B16
79||	SWAP4	A16,A16
80	SWAP4	B17,B17
81||	SWAP4	A17,A17
82	.endif
83
84	AND	B16,B0,B20		; r0 = key[0] & 0x0fffffff
85||	AND	B17,B1,B22		; r1 = key[1] & 0x0ffffffc
86||	EXTU	B17,4,6,B16		; r1>>2
87	AND	A16,B1,B21		; r2 = key[2] & 0x0ffffffc
88||	AND	A17,B1,A23		; r3 = key[3] & 0x0ffffffc
89||	BNOP	RA
90	SHRU	B21,2,B18
91||	ADD	B22,B16,B16		; s1 = r1 + r1>>2
92
93	STDW	B21:B20,*${CTXA}[3]	; save r2:r0
94||	ADD	B21,B18,B18		; s2 = r2 + r2>>2
95||	SHRU	A23,2,B17
96||	MV	A23,B23
97	STDW	B23:B22,*${CTXA}[4]	; save r3:r1
98||	ADD	B23,B17,B19		; s3 = r3 + r3>>2
99||	ADD	B23,B17,B17		; s3 = r3 + r3>>2
100	STDW	B17:B16,*${CTXA}[5]	; save s3:s1
101	STDW	B19:B18,*${CTXA}[6]	; save s3:s2
102||	ZERO	A4			; return 0
103	.endasmfunc
104
105	.global	_poly1305_blocks
106	.align	32
107_poly1305_blocks:
108	.asmfunc	stack_usage(40)
109	SHRU	$LEN,4,A2		; A2 is loop counter, number of blocks
110  [!A2]	BNOP	RA			; no data
111|| [A2]	STW	FP,*SP--(40)		; save frame pointer and alloca(40)
112|| [A2]	MV	SP,FP
113   [A2]	STDW	B13:B12,*SP[4]		; ABI says so
114|| [A2]	MV	$CTXA,$S3b		; borrow $S3b
115   [A2]	STDW	B11:B10,*SP[3]
116|| [A2]	STDW	A13:A12,*FP[-3]
117   [A2]	STDW	A11:A10,*FP[-4]
118
119|| [A2]	LDDW	*${S3b}[0],B25:B24	; load h1:h0
120   [A2]	LDNW	*${INPB}++[4],$D0	; load inp[0]
121   [A2]	LDNW	*${INPB}[-3],$D1	; load inp[1]
122
123	LDDW	*${CTXA}[1],B29:B28	; load h3:h2, B28 is h2
124	LDNW	*${INPB}[-2],$D2	; load inp[2]
125	LDNW	*${INPB}[-1],$D3	; load inp[3]
126
127	LDDW	*${CTXA}[3],$R2:$R0	; load r2:r0
128||	LDDW	*${S3b}[4],$R3:$R1	; load r3:r1
129||	SWAP2	$D0,$D0
130
131	LDDW	*${CTXA}[5],$S3:$S1	; load s3:s1
132||	LDDW	*${S3b}[6],$S3b:$S2	; load s3:s2
133||	SWAP4	$D0,$D0
134||	SWAP2	$D1,$D1
135
136	ADDU	$D0,B24,$D0:$H0		; h0+=inp[0]
137||	ADD	$D0,B24,B27		; B-copy of h0+inp[0]
138||	SWAP4	$D1,$D1
139	ADDU	$D1,B25,$D1:$H1		; h1+=inp[1]
140||	MVK	3,$THREE
141||	SWAP2	$D2,$D2
142	LDW	*${CTXA}[4],$H4		; load h4
143||	SWAP4	$D2,$D2
144||	MV	B29,B30			; B30 is h3
145	MV	$R0,$R0b
146
147loop?:
148	MPY32U	$H0,$R0,A17:A16
149||	MPY32U	B27,$R1,B17:B16		; MPY32U	$H0,$R1,B17:B16
150||	ADDU	$D0,$D1:$H1,B25:B24	; ADDU		$D0,$D1:$H1,$D1:$H1
151||	ADDU	$D2,B28,$D2:$H2		; h2+=inp[2]
152||	SWAP2	$D3,$D3
153	MPY32U	$H0,$R2,A19:A18
154||	MPY32U	B27,$R3,B19:B18		; MPY32U	$H0,$R3,B19:B18
155||	ADD	$D0,$H1,A24		; A-copy of B24
156||	SWAP4	$D3,$D3
157|| [A2]	SUB	A2,1,A2			; decrement loop counter
158
159	MPY32U	A24,$S3,A21:A20		; MPY32U	$H1,$S3,A21:A20
160||	MPY32U	B24,$R0b,B21:B20	; MPY32U	$H1,$R0,B21:B20
161||	ADDU	B25,$D2:$H2,$D2:$H2	; ADDU		$D1,$D2:$H2,$D2:$H2
162||	ADDU	$D3,B30,$D3:$H3		; h3+=inp[3]
163||	ADD	B25,$H2,B25		; B-copy of $H2
164	MPY32U	A24,$R1,A23:A22		; MPY32U	$H1,$R1,A23:A22
165||	MPY32U	B24,$R2,B23:B22		; MPY32U	$H1,$R2,B23:B22
166
167	MPY32U	$H2,$S2,A25:A24
168||	MPY32U	B25,$S3b,B25:B24	; MPY32U	$H2,$S3,B25:B24
169||	ADDU	$D2,$D3:$H3,$D3:$H3
170||	ADD	$PADBIT,$H4,$H4		; h4+=padbit
171	MPY32U	$H2,$R0,A27:A26
172||	MPY32U	$H2,$R1,B27:B26
173||	ADD	$D3,$H4,$H4
174||	MV	$S2,$S2a
175
176	MPY32U	$H3,$S1,A29:A28
177||	MPY32U	$H3,$S2,B29:B28
178||	ADD	A21,A17,A21		; start accumulating "d3:d0"
179||	ADD	B21,B17,B21
180||	ADDU	A20,A16,A17:A16
181||	ADDU	B20,B16,B17:B16
182|| [A2]	LDNW	*${INPB}++[4],$D0	; load inp[0]
183	MPY32U	$H3,$S3,A31:A30
184||	MPY32U	$H3,$R0b,B31:B30
185||	ADD	A23,A19,A23
186||	ADD	B23,B19,B23
187||	ADDU	A22,A18,A19:A18
188||	ADDU	B22,B18,B19:B18
189|| [A2]	LDNW	*${INPB}[-3],$D1	; load inp[1]
190
191	MPY32	$H4,$S1,B20
192||	MPY32	$H4,$S2a,A20
193||	ADD	A25,A21,A21
194||	ADD	B25,B21,B21
195||	ADDU	A24,A17:A16,A17:A16
196||	ADDU	B24,B17:B16,B17:B16
197|| [A2]	LDNW	*${INPB}[-2],$D2	; load inp[2]
198	MPY32	$H4,$S3b,B22
199||	ADD	A27,A23,A23
200||	ADD	B27,B23,B23
201||	ADDU	A26,A19:A18,A19:A18
202||	ADDU	B26,B19:B18,B19:B18
203|| [A2]	LDNW	*${INPB}[-1],$D3	; load inp[3]
204
205	MPY32	$H4,$R0b,$H4
206||	ADD	A29,A21,A21		; final hi("d0")
207||	ADD	B29,B21,B21		; final hi("d1")
208||	ADDU	A28,A17:A16,A17:A16	; final lo("d0")
209||	ADDU	B28,B17:B16,B17:B16
210	ADD	A31,A23,A23		; final hi("d2")
211||	ADD	B31,B23,B23		; final hi("d3")
212||	ADDU	A30,A19:A18,A19:A18
213||	ADDU	B30,B19:B18,B19:B18
214	ADDU	B20,B17:B16,B17:B16	; final lo("d1")
215||	ADDU	A20,A19:A18,A19:A18	; final lo("d2")
216	ADDU	B22,B19:B18,B19:B18	; final lo("d3")
217
218||	ADD	A17,A21,A21		; "flatten" "d3:d0"
219	MV	A19,B29			; move to avoid cross-path stalls
220	ADDU	A21,B17:B16,B27:B26	; B26 is h1
221	ADD	B21,B27,B27
222||	DMV	B29,A18,B29:B28		; move to avoid cross-path stalls
223	ADDU	B27,B29:B28,B29:B28	; B28 is h2
224|| [A2]	SWAP2	$D0,$D0
225	ADD	A23,B29,B29
226|| [A2]	SWAP4	$D0,$D0
227	ADDU	B29,B19:B18,B31:B30	; B30 is h3
228	ADD	B23,B31,B31
229||	MV	A16,B24			; B24 is h0
230|| [A2]	SWAP2	$D1,$D1
231	ADD	B31,$H4,$H4
232|| [A2]	SWAP4	$D1,$D1
233
234	SHRU	$H4,2,B16		; last reduction step
235||	AND	$H4,$THREE,$H4
236	ADDAW	B16,B16,B16		; 5*(h4>>2)
237|| [A2]	BNOP	loop?
238
239	ADDU	B24,B16,B25:B24		; B24 is h0
240|| [A2]	SWAP2	$D2,$D2
241	ADDU	B26,B25,B27:B26		; B26 is h1
242|| [A2]	SWAP4	$D2,$D2
243	ADDU	B28,B27,B29:B28		; B28 is h2
244|| [A2]	ADDU	$D0,B24,$D0:$H0		; h0+=inp[0]
245|| [A2]	ADD	$D0,B24,B27		; B-copy of h0+inp[0]
246	ADDU	B30,B29,B31:B30		; B30 is h3
247	ADD	B31,$H4,$H4
248|| [A2]	ADDU	$D1,B26,$D1:$H1		; h1+=inp[1]
249;;===== branch to loop? is taken here
250
251	LDDW	*FP[-4],A11:A10		; ABI says so
252	LDDW	*FP[-3],A13:A12
253||	LDDW	*SP[3],B11:B10
254	LDDW	*SP[4],B13:B12
255||	MV	B26,B25
256||	BNOP	RA
257	LDW	*++SP(40),FP		; restore frame pointer
258||	MV	B30,B29
259	STDW	B25:B24,*${CTXA}[0]	; save h1:h0
260	STDW	B29:B28,*${CTXA}[1]	; save h3:h2
261	STW	$H4,*${CTXA}[4]		; save h4
262	NOP	1
263	.endasmfunc
264___
265{
266my ($MAC,$NONCEA,$NONCEB)=($INPB,$LEN,$PADBIT);
267
268$code.=<<___;
269	.global	_poly1305_emit
270	.align	32
271_poly1305_emit:
272	.asmfunc
273	LDDW	*${CTXA}[0],A17:A16	; load h1:h0
274	LDDW	*${CTXA}[1],A19:A18	; load h3:h2
275	LDW	*${CTXA}[4],A20		; load h4
276	MV	$NONCEA,$NONCEB
277
278	MVK	5,A22			; compare to modulus
279	ADDU	A16,A22,A23:A22
280||	LDW	*${NONCEA}[0],A8
281||	LDW	*${NONCEB}[1],B8
282	ADDU	A17,A23,A25:A24
283||	LDW	*${NONCEA}[2],A9
284||	LDW	*${NONCEB}[3],B9
285	ADDU	A19,A25,A27:A26
286	ADDU	A19,A27,A29:A28
287	ADD	A20,A29,A29
288
289	SHRU	A29,2,A2		; check for overflow in 130-th bit
290
291   [A2]	MV	A22,A16			; select
292|| [A2]	MV	A24,A17
293   [A2]	MV	A26,A18
294|| [A2]	MV	A28,A19
295
296||	ADDU	A8,A16,A23:A22		; accumulate nonce
297	ADDU	B8,A17,A25:A24
298||	SWAP2	A22,A22
299	ADDU	A23,A25:A24,A25:A24
300	ADDU	A9,A18,A27:A26
301||	SWAP2	A24,A24
302	ADDU	A25,A27:A26,A27:A26
303||	ADD	B9,A19,A28
304	ADD	A27,A28,A28
305||	SWAP2	A26,A26
306
307	.if	.BIG_ENDIAN
308	SWAP2	A28,A28
309||	SWAP4	A22,A22
310||	SWAP4	A24,B24
311	SWAP4	A26,A26
312	SWAP4	A28,A28
313||	MV	B24,A24
314	.endif
315
316	BNOP	RA,1
317	STNW	A22,*${MAC}[0]		; write the result
318	STNW	A24,*${MAC}[1]
319	STNW	A26,*${MAC}[2]
320	STNW	A28,*${MAC}[3]
321	.endasmfunc
322___
323}
324$code.=<<___;
325	.sect	.const
326	.cstring "Poly1305 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
327	.align	4
328___
329
330print $code;
331