Lines Matching refs:Xi
145 static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256]) in gcm_gmult_8bit()
148 const u8 *xi = (const u8 *)Xi+15; in gcm_gmult_8bit()
221 if ((u8 *)Xi==xi) break; in gcm_gmult_8bit()
236 Xi[0] = BSWAP8(Z.hi); in gcm_gmult_8bit()
237 Xi[1] = BSWAP8(Z.lo); in gcm_gmult_8bit()
239 u8 *p = (u8 *)Xi; in gcm_gmult_8bit()
248 Xi[0] = Z.hi; in gcm_gmult_8bit()
249 Xi[1] = Z.lo; in gcm_gmult_8bit()
252 #define GCM_MUL(ctx,Xi) gcm_gmult_8bit(ctx->Xi.u,ctx->Htable) argument
335 static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) in gcm_gmult_4bit()
342 nlo = ((const u8 *)Xi)[15]; in gcm_gmult_4bit()
363 nlo = ((const u8 *)Xi)[cnt]; in gcm_gmult_4bit()
381 Xi[0] = BSWAP8(Z.hi); in gcm_gmult_4bit()
382 Xi[1] = BSWAP8(Z.lo); in gcm_gmult_4bit()
384 u8 *p = (u8 *)Xi; in gcm_gmult_4bit()
393 Xi[0] = Z.hi; in gcm_gmult_4bit()
394 Xi[1] = Z.lo; in gcm_gmult_4bit()
406 static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],
417 nlo = ((const u8 *)Xi)[15];
439 nlo = ((const u8 *)Xi)[cnt];
513 nlo = ((const u8 *)Xi)[cnt];
531 nlo = ((const u8 *)Xi)[0];
551 Xi[0] = BSWAP8(Z.hi);
552 Xi[1] = BSWAP8(Z.lo);
554 u8 *p = (u8 *)Xi;
563 Xi[0] = Z.hi;
564 Xi[1] = Z.lo;
570 void gcm_gmult_4bit(u64 Xi[2],const u128 Htable[16]);
571 void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
574 #define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable) argument
576 #define GHASH(ctx,in,len) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len)
585 static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2])
590 const long *xi = (const long *)Xi;
625 Xi[0] = BSWAP8(Z.hi);
626 Xi[1] = BSWAP8(Z.lo);
628 u8 *p = (u8 *)Xi;
637 Xi[0] = Z.hi;
638 Xi[1] = Z.lo;
641 #define GCM_MUL(ctx,Xi) gcm_gmult_1bit(ctx->Xi.u,ctx->H.u) argument
654 void gcm_init_clmul(u128 Htable[16],const u64 Xi[2]);
655 void gcm_gmult_clmul(u64 Xi[2],const u128 Htable[16]);
656 void gcm_ghash_clmul(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
660 void gcm_gmult_4bit_mmx(u64 Xi[2],const u128 Htable[16]);
661 void gcm_ghash_4bit_mmx(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
663 void gcm_gmult_4bit_x86(u64 Xi[2],const u128 Htable[16]);
664 void gcm_ghash_4bit_x86(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
675 void gcm_init_neon(u128 Htable[16],const u64 Xi[2]);
676 void gcm_gmult_neon(u64 Xi[2],const u128 Htable[16]);
677 void gcm_ghash_neon(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
678 void gcm_init_v8(u128 Htable[16],const u64 Xi[2]);
679 void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
680 void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
687 # define GCM_MUL(ctx,Xi) (*gcm_gmult_p)(ctx->Xi.u,ctx->Htable) argument
690 # define GHASH(ctx,in,len) (*gcm_ghash_p)(ctx->Xi.u,ctx->Htable,in,len)
780 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
785 ctx->Xi.u[0] = 0;
786 ctx->Xi.u[1] = 0;
859 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
861 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
876 ctx->Xi.c[n] ^= *(aad++);
880 if (n==0) GCM_MUL(ctx,Xi);
895 for (i=0; i<16; ++i) ctx->Xi.c[i] ^= aad[i];
896 GCM_MUL(ctx,Xi);
903 for (i=0; i<len; ++i) ctx->Xi.c[i] ^= aad[i];
921 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
923 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
938 GCM_MUL(ctx,Xi);
956 ctx->Xi.c[n] ^= *(out++) = *(in++)^ctx->EKi.c[n];
960 if (n==0) GCM_MUL(ctx,Xi);
1038 ctx->Xi.t[i] ^=
1040 GCM_MUL(ctx,Xi);
1058 ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n];
1080 ctx->Xi.c[n] ^= out[i] = in[i]^ctx->EKi.c[n];
1083 GCM_MUL(ctx,Xi);
1101 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
1103 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
1115 GCM_MUL(ctx,Xi);
1135 ctx->Xi.c[n] ^= c;
1139 if (n==0) GCM_MUL (ctx,Xi);
1217 ctx->Xi.t[i] ^= c;
1219 GCM_MUL(ctx,Xi);
1238 ctx->Xi.c[n] ^= c;
1264 ctx->Xi.c[n] ^= c;
1267 GCM_MUL(ctx,Xi);
1284 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
1286 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
1298 GCM_MUL(ctx,Xi);
1314 ctx->Xi.c[n] ^= *(out++) = *(in++)^ctx->EKi.c[n];
1318 if (n==0) GCM_MUL(ctx,Xi);
1362 for (i=0;i<16;++i) ctx->Xi.c[i] ^= out[i];
1363 GCM_MUL(ctx,Xi);
1380 ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n];
1399 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
1401 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
1413 GCM_MUL(ctx,Xi);
1431 ctx->Xi.c[n] ^= c;
1435 if (n==0) GCM_MUL (ctx,Xi);
1467 for (k=0;k<16;++k) ctx->Xi.c[k] ^= in[k];
1468 GCM_MUL(ctx,Xi);
1501 ctx->Xi.c[n] ^= c;
1518 void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
1522 GCM_MUL(ctx,Xi);
1539 ctx->Xi.u[0] ^= alen;
1540 ctx->Xi.u[1] ^= clen;
1541 GCM_MUL(ctx,Xi);
1543 ctx->Xi.u[0] ^= ctx->EK0.u[0];
1544 ctx->Xi.u[1] ^= ctx->EK0.u[1];
1546 if (tag && len<=sizeof(ctx->Xi))
1547 return memcmp(ctx->Xi.c,tag,len);
1555 memcpy(tag, ctx->Xi.c, len<=sizeof(ctx->Xi.c)?len:sizeof(ctx->Xi.c));
1909 void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],